diff --git a/.dockerignore b/.dockerignore index 3bb2cfa7a4f7..f5796e7a7bb5 100644 --- a/.dockerignore +++ b/.dockerignore @@ -39,6 +39,10 @@ cmd/clusterctl/clusterctl/** **/bin/** **/out/** +# go.work files +go.work +go.work.sum + # Test binary, build with `go test -c` **/*.test diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 8087151a5160..000000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -name: 🐛 Bug report -about: Tell us about a problem you are experiencing. -title: '' -labels: '' -assignees: '' - ---- - -**What steps did you take and what happened:** -[A clear and concise description on how to REPRODUCE the bug.] - - -**What did you expect to happen:** - - -**Anything else you would like to add:** -[Miscellaneous information that will assist in solving the issue.] - - -**Environment:** - -- Cluster-api version: -- minikube/kind version: -- Kubernetes version: (use `kubectl version`): -- OS (e.g. from `/etc/os-release`): - -/kind bug -[One or more /area label. See https://github.com/kubernetes-sigs/cluster-api/labels?q=area for the list of labels] diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml new file mode 100644 index 000000000000..befec552a825 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -0,0 +1,48 @@ +name: 🐛 Bug Report +description: Report a bug encountered while using Cluster API +body: + - type: textarea + id: problem + attributes: + label: What steps did you take and what happened? + description: | + Please provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner. + If this matter is security related, please follow the guidelines described in https://github.com/kubernetes-sigs/cluster-api/blob/main/SECURITY_CONTACTS + placeholder: "A clear and concise description on how to REPRODUCE the bug." + validations: + required: true + + - type: textarea + id: expected + attributes: + label: What did you expect to happen? + validations: + required: true + + - type: textarea + id: capiVersion + attributes: + label: Cluster API version + placeholder: "The version of the Cluster API used in the environment." + validations: + required: true + + - type: textarea + id: kubeVersion + attributes: + label: Kubernetes version + placeholder: "$kubectl version" + + - type: textarea + id: additional + attributes: + label: Anything else you would like to add? + placeholder: "Miscellaneous information that will assist in solving the issue." + + - type: textarea + id: templateLabel + attributes: + label: Label(s) to be applied + value: | + /kind bug + One or more /area label. See https://github.com/kubernetes-sigs/cluster-api/labels?q=area for the list of labels. diff --git a/.github/ISSUE_TEMPLATE/failing-test.md b/.github/ISSUE_TEMPLATE/failing-test.md deleted file mode 100644 index 99f4f5d26102..000000000000 --- a/.github/ISSUE_TEMPLATE/failing-test.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -name: 🚨 Failing Test -about: Report continuously failing tests or jobs in Cluster API CI -title: '' -labels: '' -assignees: '' - ---- - - - -**Which jobs are failing:** - -**Which tests are failing:** - -**Since when has it been failing:** - -**Testgrid link:** - -**Reason for failure (if possible):** - -**Anything else we need to know:** - -/kind failing-test - -[One or more /area label. See https://github.com/kubernetes-sigs/cluster-api/labels?q=area for the list of labels] \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/failing_test.yaml b/.github/ISSUE_TEMPLATE/failing_test.yaml new file mode 100644 index 000000000000..ab182697c2b1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/failing_test.yaml @@ -0,0 +1,49 @@ +name: 🚨 Failing Test +description: Report continuously failing tests or jobs in Cluster API CI +body: + - type: textarea + id: jobs + attributes: + label: Which jobs are failing? + placeholder: | + Please only use this template for submitting reports about continuously failing tests or jobs in Cluster API CI. + validations: + required: true + + - type: textarea + id: tests + attributes: + label: Which tests are failing? + validations: + required: true + + - type: textarea + id: since + attributes: + label: Since when has it been failing? + validations: + required: true + + - type: input + id: testgrid + attributes: + label: Testgrid link + + - type: textarea + id: reason + attributes: + label: Reason for failure (if possible) + + - type: textarea + id: additional + attributes: + label: Anything else we need to know? + placeholder: "Miscellaneous information that will assist in fixing the failing test." + + - type: textarea + id: templateLabel + attributes: + label: Label(s) to be applied + value: | + /kind failing-test + One or more /area label. See https://github.com/kubernetes-sigs/cluster-api/labels?q=area for the list of labels. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 4404765eee2a..000000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -name: ✨ Feature request -about: Suggest an idea for this project. -title: '' -labels: '' -assignees: '' - ---- - - - -**User Story** - -As a [developer/user/operator] I would like to [high level description] for [reasons] - -**Detailed Description** - -[A clear and concise description of what you want to happen.] - -**Anything else you would like to add:** - -[Miscellaneous information that will assist in solving the issue.] - -/kind feature diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml new file mode 100644 index 000000000000..880103411922 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -0,0 +1,35 @@ +name: ✨ Feature request +description: Suggest an idea for this project. +body: + - type: textarea + id: user_story + attributes: + label: What would you like to be added (User Story)? + description: | + A large proposal that works through the design along with the implications of the change can be opened as a CAEP. + See https://github.com/kubernetes-sigs/cluster-api/blob/main/CONTRIBUTING.md#proposal-process-caep + placeholder: "As a [developer/user/operator] I would like to [high level description] for [reasons]." + validations: + required: true + + - type: textarea + id: detailed_feature_description + attributes: + label: Detailed Description + placeholder: "A clear and concise description of what you want to happen." + validations: + required: true + + - type: textarea + id: additional + attributes: + label: Anything else you would like to add? + placeholder: "Miscellaneous information that will assist in solving the issue." + + - type: textarea + id: templateLabel + attributes: + label: Label(s) to be applied + value: | + /kind feature + One or more /area label. See https://github.com/kubernetes-sigs/cluster-api/labels?q=area for the list of labels. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/flaking-test.md b/.github/ISSUE_TEMPLATE/flaking-test.md deleted file mode 100644 index 942910d27e8c..000000000000 --- a/.github/ISSUE_TEMPLATE/flaking-test.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: ❄️ Flaking Test -about: Report flaky tests or jobs in Cluster API CI -title: '' -labels: '' -assignees: '' - ---- - - - -**Which jobs are flaking:** - -**Which tests are flaking:** - -**Testgrid link:** - -**Reason for failure (if possible):** - -**Anything else we need to know:** -- links to go.k8s.io/triage appreciated -- links to specific failures in spyglass appreciated - - -/kind flake - -[One or more /area label. See https://github.com/kubernetes-sigs/cluster-api/labels?q=area for the list of labels] \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/flaking_test.yaml b/.github/ISSUE_TEMPLATE/flaking_test.yaml new file mode 100644 index 000000000000..407793002cd8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/flaking_test.yaml @@ -0,0 +1,50 @@ +name: ❄️ Flaking Test +description: Report flaky tests or jobs in Cluster API CI +body: + - type: textarea + id: jobs + attributes: + label: Which jobs are flaking? + description: | + Please only use this template for submitting reports about flaky tests or jobs (pass or fail with no underlying change in code) in Cluster API CI. + Links to go.k8s.io/triage and/or links to specific failures in spyglass are appreciated. + validations: + required: true + + - type: textarea + id: tests + attributes: + label: Which tests are flaking? + validations: + required: true + + - type: textarea + id: since + attributes: + label: Since when has it been flaking? + validations: + required: true + + - type: input + id: testgrid + attributes: + label: Testgrid link + + - type: textarea + id: reason + attributes: + label: Reason for failure (if possible) + + - type: textarea + id: additional + attributes: + label: Anything else we need to know? + placeholder: "Miscellaneous information that will assist in fixing the flaking test." + + - type: textarea + id: templateLabel + attributes: + label: Label(s) to be applied + value: | + /kind flake + One or more /area label. See https://github.com/kubernetes-sigs/cluster-api/labels?q=area for the list of labels. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/kubernetes_bump.md b/.github/ISSUE_TEMPLATE/kubernetes_bump.md index e9a37c8b5481..2bcc1093b038 100644 --- a/.github/ISSUE_TEMPLATE/kubernetes_bump.md +++ b/.github/ISSUE_TEMPLATE/kubernetes_bump.md @@ -11,9 +11,7 @@ This issue is tracking the tasks that should be implemented **after** the Kubern ## Tasks -Prerequisites: -* [ ] Decide which Cluster API release series will support the new Kubernetes version - * If feasible we usually cherry-pick the changes back to the latest release series. +**Note:** If feasible we usually cherry-pick the changes back to the latest release series. ### Supporting managing and running on the new Kubernetes version @@ -21,33 +19,62 @@ This section contains tasks to update our book, e2e testing and CI to use and te as well as changes to Cluster API that we might have to make to support the new Kubernetes version. All of these changes should be cherry-picked to all release series that will support the new Kubernetes version. -* [ ] Modify quickstart and CAPD to use the new Kubernetes release: - * Bump the Kubernetes version in: - * `test/*`: search for occurrences of the previous Kubernetes version - * `Tiltfile` - * Ensure the latest available kind version is used (including the latest images for this kind release) - * Verify the quickstart manually - * Prior art: #7156 - * bump `InitWithKubernetesVersion` in `clusterctl_upgrade_test.go` +* [ ] Continuously modify CAPD to use early versions of the upcoming Kubernetes release (betas and rcs): + * Bump the Kubernetes version in `test/*` except for `test/infrastructure/kind/*`. + * Prior art: #10384 +* [ ] Modify CAPD to use the new Kubernetes release after it is GA: + * Bump the Kubernetes version in `test/*` except for `test/infrastructure/kind/*`. + * Prior art: #10454 * [ ] Ensure the jobs are adjusted to provide test coverage according to our [support policy](https://cluster-api.sigs.k8s.io/reference/versions.html#supported-kubernetes-versions): - * For the main branch and the release branch of the latest supported Cluster API minor release: - * Add new periodic upgrade job. - * Adjust presubmit jobs so that we have the latest upgrade jobs available on PRs. - * For the main branch: - * periodics & presubmits: - * Bump `KUBEBUILDER_ENVTEST_KUBERNETES_VERSION` of the `test-mink8s` jobs to the new minimum supported management cluster version. - * periodics: - * Bump `KUBERNETES_VERSION_MANAGEMENT` of the `e2e-mink8s` job to the new minimum supported management cluster version. - * Drop the oldest upgrade job as the oldest Kubernetes minor version is now out of support. - * Prior art: https://github.com/kubernetes/test-infra/pull/27421 + + * At the `.versions` section in the `cluster-api-prowjob-gen.yaml` file in [test-infra](https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes-sigs/cluster-api/): + * Add a new entry for the new Kubernetes version + * Adjust the released Kubernetes's version entry to refer `stable-1.` instead of `ci/latest-1.` + * Check and update the versions for the keys `etcd` and `coreDNS` if necessary: + * For etcd, see the `DefaultEtcdVersion` kubeadm constant: [e.g. for v1.28.0](https://github.com/kubernetes/kubernetes/blob/v1.28.0/cmd/kubeadm/app/constants/constants.go#L308) + * For coredns, see the `CoreDNSVersion` kubeadm constant:[e.g. for v1.28.0](https://github.com/kubernetes/kubernetes/blob/v1.28.0/cmd/kubeadm/app/constants/constants.go#L344) + * For the `.branches.main` section in the `cluster-api-prowjob-gen.yaml` file in [test-infra](https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes-sigs/cluster-api/): + * For the `.upgrades` section: + * Drop the oldest upgrade + * Add a new upgrade entry from the previous to the new Kubernetes version + * Bump the version set at `.kubernetesVersionManagement` to the new minimum supported management cluster version (This is the image version available as kind image). + * Bump the version set at `.kubebuilderEnvtestKubernetesVersion` to the new minimum supported management cluster version. + * Run `make generate-test-infra-prowjobs` to generate the resulting prowjob configuration: + + ```sh + TEST_INFRA_DIR=../../k8s.io/test-infra make generate-test-infra-prowjobs + ``` + + * Prior art: #32456 + * [ ] Update book: * Update supported versions in `versions.md` - * Update job documentation in `jobs.md` - * Prior art: #7194 #7196 + * Prior art: #10454 + * [ ] Issues specific to the Kubernetes minor release: * Sometimes there are adjustments that we have to make in Cluster API to be able to support a new Kubernetes minor version. Please add these issues here when they are identified. +### Bump quickstart and kind image references in CAPD + +Prerequisites: + +* The target Kubernetes version is GA +* There is a new [kind version with/or a new set of kind images](https://github.com/kubernetes-sigs/kind/releases) for the target Kubernetes version + +* [ ] Bump quickstart and kind image references in CAPD: + * Bump the Kubernetes version in: + * `docs/*` + * `Tiltfile` + * Bump kind image references in CAPD (and also kind if necessary, including the latest images for this kind release) + * Add new images in the [kind mapper.go](https://github.com/kubernetes-sigs/cluster-api/blob/0f47a19e038ee6b0d3b1e7675a62cdaf84face8c/test/infrastructure/kind/mapper.go#L79). + * See the [kind releases page](https://github.com/kubernetes-sigs/kind/releases) for the list of released images. + * Set new default image for the [test framework](https://github.com/kubernetes-sigs/cluster-api/blob/0f47a19e038ee6b0d3b1e7675a62cdaf84face8c/test/framework/bootstrap/kind_provider.go#L40) + * If code changes are required for CAPD to incorporate the new Kind version, update [kind latestMode](https://github.com/kubernetes-sigs/cluster-api/blob/0f47a19e038ee6b0d3b1e7675a62cdaf84face8c/test/infrastructure/kind/mapper.go#L66) + * Verify the quickstart manually + * Prior art: #10610 +* [ ] Cherry-pick above PR to the latest release branch. + ### Using new Kubernetes dependencies This section contains tasks to update Cluster API to use the latest Kubernetes Go dependencies and related topics @@ -56,19 +83,21 @@ need them in older releases as they are not necessary to manage workload cluster run the Cluster API controllers on the new Kubernetes version. * [ ] Ensure there is a new controller-runtime minor release which uses the new Kubernetes Go dependencies. -* [ ] Update our Prow jobs for the `main` branch to use the correct `kubekins-e2e` image +* [ ] Update our Prow jobs for the `main` branch to use the correct `kubekins-e2e` image via the configuration file and by running `make generate-test-infra-prowjobs`. * It is recommended to have one PR for presubmit and one for periodic jobs to reduce the risk of breaking the periodic jobs. - * Prior art: presubmit jobs: https://github.com/kubernetes/test-infra/pull/27311 - * Prior art: periodic jobs: https://github.com/kubernetes/test-infra/pull/27326 + * Prior art: https://github.com/kubernetes/test-infra/pull/32380 * [ ] Bump the Go version in Cluster API: (if Kubernetes is using a new Go minor version) * Search for the currently used Go version across the repository and update it * We have to at least modify it in: `hack/ensure-go.sh`, `.golangci.yml`, `cloudbuild*.yaml`, `go.mod`, `Makefile`, `netlify.toml`, `Tiltfile` - * Prior art: #7135 + * Prior art: #10452 * [ ] Bump controller-runtime * [ ] Bump controller-tools * [ ] Bump the Kubernetes version used in integration tests via `KUBEBUILDER_ENVTEST_KUBERNETES_VERSION` in `Makefile` * **Note**: This PR should be cherry-picked as well. It is part of this section as it depends on kubebuilder/controller-runtime releases and is not strictly necessary for [Supporting managing and running on the new Kubernetes version](#supporting-managing-and-running-on-the-new-kubernetes-version). + * Prior art to release envtest binaries: https://github.com/kubernetes-sigs/kubebuilder/pull/3864 * Prior art: #7193 * [ ] Bump conversion-gen via `CONVERSION_GEN_VER` in `Makefile` * Prior art: #7118 + +After release of CAPI v1.X.x (that supports Kubernetes v1.Y): diff --git a/.github/ISSUE_TEMPLATE/release_tracking.md b/.github/ISSUE_TEMPLATE/release_tracking.md index 09b145691038..87e0f2f608dd 100644 --- a/.github/ISSUE_TEMPLATE/release_tracking.md +++ b/.github/ISSUE_TEMPLATE/release_tracking.md @@ -14,14 +14,11 @@ Please see the corresponding section in [release-tasks.md](https://github.com/ku **Notes**: * Weeks are only specified to give some orientation. -* The following is based on the v1.4 release cycle. Modify according to the tracked release cycle. - -Week -3 to 1: -* [ ] [Release Lead] [Set a tentative release date for the minor release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#set-a-tentative-release-date-for-the-minor-release) -* [ ] [Release Lead] [Assemble release team](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#assemble-release-team) +* The following is based on the v1.6 release cycle. Modify according to the tracked release cycle. Week 1: * [ ] [Release Lead] [Finalize release schedule and team](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#finalize-release-schedule-and-team) +* [ ] [Release Lead] [Add/remove release team members](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#addremove-release-team-members) * [ ] [Release Lead] [Prepare main branch for development of the new release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#prepare-main-branch-for-development-of-the-new-release) * [ ] [Communications Manager] [Add docs to collect release notes for users and migration notes for provider implementers](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#add-docs-to-collect-release-notes-for-users-and-migration-notes-for-provider-implementers) * [ ] [Communications Manager] [Update supported versions](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#update-supported-versions) @@ -30,38 +27,42 @@ Week 1 to 4: * [ ] [Release Lead] [Track] [Remove previously deprecated code](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#track-remove-previously-deprecated-code) Week 6: -* [ ] [Release Lead] [Cut the v1.3.1 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) +* [ ] [Release Lead] [Cut the v1.5.1 & v1.4.6 releases](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) Week 9: -* [ ] [Release Lead] [Cut the v1.3.2 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) +* [ ] [Release Lead] [Cut the v1.5.2 & v1.4.7 releases](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) Week 11 to 12: * [ ] [Release Lead] [Track] [Bump dependencies](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#track-bump-dependencies) Week 13: -* [ ] [Release Lead] [Cut the v1.4.0-beta.0 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) -* [ ] [Release Lead] [Cut the v1.3.3 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) +* [ ] [Release Lead] [Cut the v1.6.0-beta.0 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) +* [ ] [Release Lead] [Cut the v1.5.3 & v1.4.8 releases](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) * [ ] [Release Lead] [Create a new GitHub milestone for the next release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#create-a-new-github-milestone-for-the-next-release) +* [ ] [Communications Manager] [Communicate beta to providers](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#communicate-beta-to-providers) Week 14: -* [ ] [Release Lead] [Cut the v1.4.0-beta.1 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) +* [ ] [Release Lead] [Cut the v1.6.0-beta.1 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) +* [ ] [Release Lead] [Set a tentative release date for the next minor release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#set-a-tentative-release-date-for-the-next-minor-release) +* [ ] [Release Lead] [Assemble next release team](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#assemble-next-release-team) * [ ] [Release Lead] Select release lead for the next release cycle Week 15: -* [ ] [Release Lead] [Create the release-1.4 release branch](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#create-a-release-branch) -* [ ] [Release Lead] [Cut the v1.4.0-rc.0 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) -* [ ] [CI Manager] [Setup jobs and dashboards for the release-1.4 release branch](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#setup-jobs-and-dashboards-for-a-new-release-branch) -* [ ] [Communications Manager] [Ensure the book for the new release is available](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#ensure-the-book-for-the-new-release-is-available) -Week 15 to 17: -* [ ] [Communications Manager] [Polish release notes](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#polish-release-notes) +* KubeCon idle week Week 16: -* [ ] [Release Lead] [Cut the v1.4.0-rc.1 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) +* [ ] [Release Lead] [Cut the v1.6.0-rc.0 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) +* [ ] [Release Lead] [Update milestone applier and GitHub Actions](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#update-milestone-applier-and-github-actions) +* [ ] [CI Manager] [Setup jobs and dashboards for the release-1.6 release branch](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#setup-jobs-and-dashboards-for-a-new-release-branch) +* [ ] [Communications Manager] [Ensure the book for the new release is available](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#ensure-the-book-for-the-new-release-is-available) Week 17: -* [ ] [Release Lead] [Cut the v1.4.0 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) -* [ ] [Release Lead] [Cut the v1.3.4 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) +* [ ] [Release Lead] [Cut the v1.6.0-rc.1 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) + +Week 18: +* [ ] [Release Lead] [Cut the v1.6.0 release](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) +* [ ] [Release Lead] [Cut the v1.5.4 & v1.4.9 releases](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#repeatedly-cut-a-release) * [ ] [Release Lead] Organize release retrospective * [ ] [Communications Manager] [Change production branch in Netlify to the new release branch](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#change-production-branch-in-netlify-to-the-new-release-branch) * [ ] [Communications Manager] [Update clusterctl links in the quickstart](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#update-clusterctl-links-in-the-quickstart) @@ -80,3 +81,7 @@ Continuously: If and when necessary: * [ ] [Release Lead] [Track] [Bump the Cluster API apiVersion](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#optional-track-bump-the-cluster-api-apiversion) * [ ] [Release Lead] [Track] [Bump the Kubernetes version](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#optional-track-bump-the-kubernetes-version) +* [ ] [Release Lead] [Track Release and Improvement tasks](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-tasks.md#optional-track-release-and-improvement-tasks) + +/priority critical-urgent +/kind feature diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 0985f69db4e3..998e7ae33f82 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,7 +1,18 @@ - - + **What this PR does / why we need it**: **Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*: Fixes # + + \ No newline at end of file diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml new file mode 100644 index 000000000000..82826f43abc2 --- /dev/null +++ b/.github/dependabot.yaml @@ -0,0 +1,56 @@ +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates +version: 2 +updates: +# GitHub Actions +- package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + groups: + all-github-actions: + patterns: [ "*" ] + commit-message: + prefix: ":seedling:" + labels: + - "area/ci" + - "ok-to-test" + +# Go modules +- package-ecosystem: "gomod" + directories: + - "/" + - "/test" + - "/hack/tools" + schedule: + interval: "weekly" + day: "monday" + ## group all dependencies with a k8s.io prefix into a single PR. + groups: + all-go-mod-patch-and-minor: + patterns: [ "*" ] + update-types: [ "patch", "minor" ] + ignore: + # Ignore controller-runtime as its upgraded manually. + - dependency-name: "sigs.k8s.io/controller-runtime" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + # Ignore k8s and its transitives modules as they are upgraded manually together with controller-runtime. + - dependency-name: "k8s.io/*" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + - dependency-name: "github.com/prometheus/*" + update-types: [ "version-update:semver-major", "version-update:semver-minor"] + - dependency-name: "go.etcd.io/*" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + - dependency-name: "google.golang.org/grpc" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + # Ignore kind as its upgraded manually. + - dependency-name: "sigs.k8s.io/kind" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + # Bumping the kustomize API independently can break compatibility with client-go as they share k8s.io/kube-openapi as a dependency. + - dependency-name: "sigs.k8s.io/kustomize/api" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + commit-message: + prefix: ":seedling:" + labels: + - "area/dependency" + - "ok-to-test" diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 83e155480f2c..000000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,30 +0,0 @@ -# Please see the documentation for all configuration options: -# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates -version: 2 -updates: -# GitHub Actions -- package-ecosystem: "github-actions" - directory: "/" - schedule: - interval: "weekly" - commit-message: - prefix: ":seedling:" - labels: - - "ok-to-test" -# Go -- package-ecosystem: "gomod" - directory: "/" - schedule: - interval: "weekly" - ignore: - # Ignore controller-runtime as its upgraded manually. - - dependency-name: "sigs.k8s.io/controller-runtime" - # Ignore k8s and its transitives modules as they are upgraded manually - # together with controller-runtime. - - dependency-name: "k8s.io/*" - - dependency-name: "go.etcd.io/*" - - dependency-name: "google.golang.org/grpc" - commit-message: - prefix: ":seedling:" - labels: - - "ok-to-test" diff --git a/.github/workflows/dependabot.yml b/.github/workflows/pr-dependabot.yaml similarity index 68% rename from .github/workflows/dependabot.yml rename to .github/workflows/pr-dependabot.yaml index 45f2ff49ea7a..7553a8683eb3 100644 --- a/.github/workflows/dependabot.yml +++ b/.github/workflows/pr-dependabot.yaml @@ -1,5 +1,6 @@ -name: dependabot +name: PR dependabot code generation and go modules fix +# This action runs on other PRs opened by dependabot. It updates modules and generated code on PRs opened by dependabot. on: pull_request: branches: @@ -18,15 +19,15 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code into the Go module directory - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3.3.0 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 - name: Calculate go version id: vars run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT - name: Set up Go - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # tag=v3.5.0 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # tag=v5.0.1 with: go-version: ${{ steps.vars.outputs.go_version }} - - uses: actions/cache@69d9d449aced6a2ede0bc19182fadc3a0a42d2b0 # tag=v3.2.6 + - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # tag=v4.0.2 name: Restore go cache with: path: | @@ -39,7 +40,7 @@ jobs: run: make generate-modules - name: Update generated code run: make generate - - uses: EndBug/add-and-commit@61a88be553afe4206585b31aa72387c64295d08b # tag=v9.1.1 + - uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # tag=v9.1.4 name: Commit changes with: author_name: dependabot[bot] diff --git a/.github/workflows/pr-gh-workflow-approve.yaml b/.github/workflows/pr-gh-workflow-approve.yaml new file mode 100644 index 000000000000..f493fd40032d --- /dev/null +++ b/.github/workflows/pr-gh-workflow-approve.yaml @@ -0,0 +1,42 @@ +name: PR approve GH Workflows + +on: + pull_request_target: + types: + - edited + - labeled + - reopened + - synchronize + +permissions: {} + +jobs: + approve: + name: Approve ok-to-test + if: contains(github.event.pull_request.labels.*.name, 'ok-to-test') + runs-on: ubuntu-latest + permissions: + actions: write + steps: + - name: Update PR + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + continue-on-error: true + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const result = await github.rest.actions.listWorkflowRunsForRepo({ + owner: context.repo.owner, + repo: context.repo.repo, + event: "pull_request", + status: "action_required", + head_sha: context.payload.pull_request.head.sha, + per_page: 100 + }); + + for (var run of result.data.workflow_runs) { + await github.rest.actions.approveWorkflowRun({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: run.id + }); + } diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/pr-golangci-lint.yaml similarity index 62% rename from .github/workflows/golangci-lint.yml rename to .github/workflows/pr-golangci-lint.yaml index 267d0f28f37a..97e5f2df73f0 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/pr-golangci-lint.yaml @@ -1,4 +1,4 @@ -name: golangci-lint +name: PR golangci-lint on: pull_request: @@ -12,22 +12,24 @@ jobs: name: lint runs-on: ubuntu-latest strategy: + fail-fast: false matrix: working-directory: - "" - test - hack/tools steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3.3.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 - name: Calculate go version id: vars run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT - name: Set up Go - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # tag=v3.5.0 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # tag=v5.0.1 with: go-version: ${{ steps.vars.outputs.go_version }} - name: golangci-lint - uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # tag=v3.4.0 + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # tag=v6.0.1 with: - version: v1.51.1 + version: v1.57.2 + args: --out-format=colored-line-number working-directory: ${{matrix.working-directory}} diff --git a/.github/workflows/lint-docs-pr.yaml b/.github/workflows/pr-md-link-check.yaml similarity index 82% rename from .github/workflows/lint-docs-pr.yaml rename to .github/workflows/pr-md-link-check.yaml index c6038fd72a97..9edd765764f7 100644 --- a/.github/workflows/lint-docs-pr.yaml +++ b/.github/workflows/pr-md-link-check.yaml @@ -1,4 +1,4 @@ -name: Check PR Markdown links +name: PR check Markdown links on: pull_request: @@ -14,7 +14,7 @@ jobs: name: Broken Links runs-on: ubuntu-latest steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3.3.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 - uses: gaurav-nelson/github-action-markdown-link-check@5c5dfc0ac2e225883c0e5f03a85311ec2830d368 # tag=v1 with: use-quiet-mode: 'yes' diff --git a/.github/workflows/verify.yml b/.github/workflows/pr-verify.yaml similarity index 73% rename from .github/workflows/verify.yml rename to .github/workflows/pr-verify.yaml index 40a09e150a05..464888e6a5d2 100644 --- a/.github/workflows/verify.yml +++ b/.github/workflows/pr-verify.yaml @@ -1,4 +1,4 @@ -name: verify +name: PR verify on: pull_request_target: @@ -14,6 +14,6 @@ jobs: steps: - name: Verifier action id: verifier - uses: kubernetes-sigs/kubebuilder-release-tools@4f3d1085b4458a49ed86918b4b55505716715b77 # tag=v0.3.0 + uses: kubernetes-sigs/kubebuilder-release-tools@012269a88fa4c034a0acf1ba84c26b195c0dbab4 # tag=v0.4.3 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000000..92ca75fcafbf --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,113 @@ +name: Create Release + +on: + push: + branches: + - main + paths: + - 'CHANGELOG/*.md' + +permissions: + contents: write # Allow to push a tag, create a release branch and publish a draft release. + +jobs: + push_release_tags: + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.release-version.outputs.release_version }} + steps: + - name: Checkout code + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 + with: + fetch-depth: 0 + - name: Get changed files + id: changed-files + uses: tj-actions/changed-files@a29e8b565651ce417abb5db7164b4a2ad8b6155c # tag=v44.4.0 + - name: Get release version + id: release-version + run: | + if [[ ${{ steps.changed-files.outputs.all_changed_files_count }} != 1 ]]; then + echo "1 release notes file should be changed to create a release tag, found ${{ steps.changed-files.outputs.all_changed_files_count }}" + exit 1 + fi + for changed_file in ${{ steps.changed-files.outputs.all_changed_files }}; do + export RELEASE_VERSION=$(echo "${changed_file}" | grep -oP '(?<=/)[^/]+(?=\.md)') + echo "RELEASE_VERSION=$RELEASE_VERSION" >> $GITHUB_ENV + echo "RELEASE_VERSION=$RELEASE_VERSION" >> $GITHUB_OUTPUT + if [[ "$RELEASE_VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$ ]]; then + echo "Valid semver: $RELEASE_VERSION" + else + echo "Invalid semver: $RELEASE_VERSION" + exit 1 + fi + done + - name: Determine the release branch to use + run: | + if [[ $RELEASE_VERSION =~ beta ]] || [[ $RELEASE_VERSION =~ alpha ]]; then + export RELEASE_BRANCH=main + echo "RELEASE_BRANCH=$RELEASE_BRANCH" >> $GITHUB_ENV + echo "This is a beta or alpha release, will use release branch $RELEASE_BRANCH" + else + export RELEASE_BRANCH=release-$(echo $RELEASE_VERSION | sed -E 's/^v([0-9]+)\.([0-9]+)\..*$/\1.\2/') + echo "RELEASE_BRANCH=$RELEASE_BRANCH" >> $GITHUB_ENV + echo "This is not a beta or alpha release, will use release branch $RELEASE_BRANCH" + fi + - name: Create or checkout release branch + run: | + if git show-ref --verify --quiet "refs/remotes/origin/$RELEASE_BRANCH"; then + echo "Branch $RELEASE_BRANCH already exists" + git checkout "$RELEASE_BRANCH" + else + git checkout -b "$RELEASE_BRANCH" + git push origin "$RELEASE_BRANCH" + echo "Created branch $RELEASE_BRANCH" + fi + - name: Validate tag does not already exist + run: | + if [[ $(git tag -l $RELEASE_VERSION) ]]; then + echo "Tag $RELEASE_VERSION already exists, exiting" + exit 1 + fi + - name: Create Release Tag + run: | + git config user.name "${GITHUB_ACTOR}" + git config user.email "${GITHUB_ACTOR}@users.noreply.github.com" + git tag -a ${RELEASE_VERSION} -m ${RELEASE_VERSION} + git tag test/${RELEASE_VERSION} + git push origin ${RELEASE_VERSION} + git push origin test/${RELEASE_VERSION} + echo "Created tags $RELEASE_VERSION and test/${RELEASE_VERSION}" + release: + name: create draft release + runs-on: ubuntu-latest + needs: push_release_tags + steps: + - name: Set env + run: echo "RELEASE_TAG=${RELEASE_TAG}" >> $GITHUB_ENV + env: + RELEASE_TAG: ${{needs.push_release_tags.outputs.release_tag}} + - name: checkout code + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 + with: + fetch-depth: 0 + ref: ${{ env.RELEASE_TAG }} + - name: Calculate go version + run: echo "go_version=$(make go-version)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # tag=v5.0.1 + with: + go-version: ${{ env.go_version }} + - name: generate release artifacts + run: | + make release + - name: get release notes + run: | + curl -L "https://raw.githubusercontent.com/${{ github.repository }}/main/CHANGELOG/${{ env.RELEASE_TAG }}.md" \ + -o "${{ env.RELEASE_TAG }}.md" + - name: Release + uses: softprops/action-gh-release@69320dbe05506a9a39fc8ae11030b214ec2d1f87 # tag=v2.0.5 + with: + draft: true + files: out/* + body_path: ${{ env.RELEASE_TAG }}.md + tag_name: ${{ env.RELEASE_TAG }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 88469b64a46d..000000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: release - -on: - push: - # Sequence of patterns matched against refs/tags - tags: - - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 - -permissions: - contents: write # Allow to create a release. - -jobs: - build: - name: create draft release - runs-on: ubuntu-latest - steps: - - name: Set env - run: echo "RELEASE_TAG=${GITHUB_REF:10}" >> $GITHUB_ENV - - name: checkout code - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3.3.0 - with: - fetch-depth: 0 - - name: Calculate go version - id: vars - run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT - - name: Set up Go - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # tag=v3.5.0 - with: - go-version: ${{ steps.vars.outputs.go_version }} - - name: generate release artifacts - run: | - make release - - name: Release - uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # tag=v1 - with: - draft: true - files: out/* - body: "TODO: Copy release notes shared by the comms team" diff --git a/.github/workflows/lint-docs-weekly.yml b/.github/workflows/weekly-md-link-check.yaml similarity index 72% rename from .github/workflows/lint-docs-weekly.yml rename to .github/workflows/weekly-md-link-check.yaml index 0f2b9e5f9c5e..173d2bab6383 100644 --- a/.github/workflows/lint-docs-weekly.yml +++ b/.github/workflows/weekly-md-link-check.yaml @@ -2,7 +2,8 @@ name: Weekly check all Markdown links on: schedule: - - cron: "0 12 * * 4" + # Cron for every Monday at 12:00 UTC. + - cron: "0 12 * * 1" # Remove all permissions from GITHUB_TOKEN except metadata. permissions: {} @@ -13,10 +14,10 @@ jobs: strategy: fail-fast: false matrix: - branch: [ main, release-1.3, release-1.2 ] + branch: [ main, release-1.7, release-1.6 ] runs-on: ubuntu-latest steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3.3.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 with: ref: ${{ matrix.branch }} - uses: gaurav-nelson/github-action-markdown-link-check@5c5dfc0ac2e225883c0e5f03a85311ec2830d368 # tag=v1 diff --git a/.github/workflows/scan.yml b/.github/workflows/weekly-security-scan.yaml similarity index 56% rename from .github/workflows/scan.yml rename to .github/workflows/weekly-security-scan.yaml index 696da22b1d33..07c182c7c82c 100644 --- a/.github/workflows/scan.yml +++ b/.github/workflows/weekly-security-scan.yaml @@ -1,7 +1,8 @@ -name: scan-images +name: Weekly security scan on: schedule: + # Cron for every Monday at 12:00 UTC. - cron: "0 12 * * 1" # Remove all permissions from GITHUB_TOKEN except metadata. @@ -10,21 +11,22 @@ permissions: {} jobs: scan: strategy: + fail-fast: false matrix: - branch: [ main, release-1.3, release-1.2 ] + branch: [ main, release-1.7, release-1.6 ] name: Trivy runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # tag=v3.3.0 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 with: ref: ${{ matrix.branch }} - name: Calculate go version id: vars run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT - name: Set up Go - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # tag=v3.5.0 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # tag=v5.0.1 with: go-version: ${{ steps.vars.outputs.go_version }} - - name: Run verify container script - run: make verify-container-images + - name: Run verify security target + run: make verify-security diff --git a/.github/workflows/weekly-test-release.yaml b/.github/workflows/weekly-test-release.yaml new file mode 100644 index 000000000000..64efc4164df1 --- /dev/null +++ b/.github/workflows/weekly-test-release.yaml @@ -0,0 +1,40 @@ +name: Weekly release test + +# Note: This workflow does not build for releases. It attempts to build release binaries periodically to ensure the repo +# release machinery is in a good state. + +on: + schedule: + # Cron for every day at 12:00 UTC. + - cron: "0 12 * * *" + +# Remove all permissions from GITHUB_TOKEN except metadata. +permissions: {} + +jobs: + weekly-test-release: + name: Test release + strategy: + fail-fast: false + matrix: + branch: [ main, release-1.7, release-1.6 ] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 + with: + ref: ${{ matrix.branch }} + fetch-depth: 0 + - name: Set env + run: echo "RELEASE_TAG=v9.9.9-fake" >> $GITHUB_ENV + - name: Set fake tag for release + run: | + git tag ${{ env.RELEASE_TAG }} + - name: Calculate go version + run: echo "go_version=$(make go-version)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # tag=v5.0.1 + with: + go-version: ${{ env.go_version }} + - name: Test release + run: | + make release \ No newline at end of file diff --git a/.gitignore b/.gitignore index 6ea28f085240..03cd627777aa 100644 --- a/.gitignore +++ b/.gitignore @@ -12,9 +12,7 @@ hack/tools/bin # E2E test templates test/e2e/data/infrastructure-docker/**/cluster-template*.yaml - -# E2e test extension deployment -test/e2e/data/test-extension/deployment.yaml +test/e2e/data/infrastructure-inmemory/**/cluster-template*.yaml # Output of Makefile targets using sed on MacOS systems *.yaml-e @@ -30,6 +28,10 @@ test/e2e/data/test-extension/deployment.yaml .vscode/ *.code-workspace +# go.work files +go.work +go.work.sum + # kubeconfigs minikube.kubeconfig @@ -74,7 +76,6 @@ _artifacts # release artifacts out -_releasenotes # Helm .helm @@ -85,3 +86,6 @@ tmp # asdf (not a typo! ;) used to manage multiple versions of tools .tool-versions + +# Development container configurations (https://containers.dev/) +.devcontainer diff --git a/.golangci.yml b/.golangci.yml index 799403e5cb81..812acedab788 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,12 +1,14 @@ run: timeout: 10m - go: "1.19" + go: "1.22" build-tags: - tools - e2e skip-files: - "zz_generated.*\\.go$" - "vendored_openapi\\.go$" + # We don't want to invest time to fix new linter findings in old API types. + - "internal/apis/.*" allow-parallel-runners: true linters: @@ -17,7 +19,6 @@ linters: - bidichk - bodyclose - containedctx - - depguard - dogsled - dupword - durationcheck @@ -37,6 +38,7 @@ linters: - govet - importas - ineffassign + - loggercheck - misspell - nakedret - nilerr @@ -59,14 +61,13 @@ linters: linters-settings: gci: - local-prefixes: "sigs.k8s.io/cluster-api" + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - prefix(sigs.k8s.io/cluster-api) # Custom section: groups all imports with the specified Prefix. + custom-order: true ginkgolinter: - # Suppress the wrong length assertion warning. - suppress-len-assertion: false - # Suppress the wrong nil assertion warning. - suppress-nil-assertion: false - # Suppress the wrong error assertion warning. - suppress-err-assertion: true + forbid-focus-container: true godot: # declarations - for top level declaration comments (default); # toplevel - for top level comments; @@ -96,10 +97,6 @@ linters-settings: - wrapperFunc - rangeValCopy - hugeParam - gosec: - excludes: - - G307 # Deferring unsafe method "Close" on type "\*os.File" - - G108 # Profiling endpoint is automatically exposed on /debug/pprof importas: no-unaliased: true alias: @@ -120,40 +117,43 @@ linters-settings: - pkg: sigs.k8s.io/controller-runtime alias: ctrl # CABPK - - pkg: sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3 + - pkg: sigs.k8s.io/cluster-api/internal/apis/bootstrap/kubeadm/v1alpha3 alias: bootstrapv1alpha3 - - pkg: sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4 + - pkg: sigs.k8s.io/cluster-api/internal/apis/bootstrap/kubeadm/v1alpha4 alias: bootstrapv1alpha4 - pkg: sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1 alias: bootstrapv1 # KCP - - pkg: sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3 + - pkg: sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha3 alias: controlplanev1alpha3 - - pkg: sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4 + - pkg: sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha4 alias: controlplanev1alpha4 - pkg: sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1 alias: controlplanev1 # CAPI - - pkg: sigs.k8s.io/cluster-api/api/v1alpha3 + - pkg: sigs.k8s.io/cluster-api/internal/apis/core/v1alpha3 alias: clusterv1alpha3 - - pkg: sigs.k8s.io/cluster-api/api/v1alpha4 + - pkg: sigs.k8s.io/cluster-api/internal/apis/core/v1alpha4 alias: clusterv1alpha4 - pkg: sigs.k8s.io/cluster-api/api/v1beta1 alias: clusterv1 # CAPI exp - - pkg: sigs.k8s.io/cluster-api/exp/api/v1alpha3 + - pkg: sigs.k8s.io/cluster-api/internal/apis/core/exp/v1alpha3 alias: expv1alpha3 - - pkg: sigs.k8s.io/cluster-api/exp/api/v1alpha4 + - pkg: sigs.k8s.io/cluster-api/internal/apis/core/exp/v1alpha4 alias: expv1alpha4 - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta1 alias: expv1 # CAPI exp addons - - pkg: sigs.k8s.io/cluster-api/exp/addons/api/v1alpha3 + - pkg: sigs.k8s.io/cluster-api/internal/apis/core/exp/addons/v1alpha3 alias: addonsv1alpha3 - - pkg: sigs.k8s.io/cluster-api/exp/addons/api/v1alpha4 + - pkg: sigs.k8s.io/cluster-api/internal/apis/core/exp/addons/v1alpha4 alias: addonsv1alpha4 - pkg: sigs.k8s.io/cluster-api/exp/addons/api/v1beta1 alias: addonsv1 + # CAPI exp IPAM + - pkg: sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1 + alias: ipamv1 # CAPI exp runtime - pkg: sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1 alias: runtimev1 @@ -219,7 +219,8 @@ linters-settings: # - name: bool-literal-in-expr - name: constant-logical-expr - + goconst: + ignore-tests: true issues: max-same-issues: 0 max-issues-per-linter: 0 @@ -231,7 +232,7 @@ issues: # should be removed as the referenced deprecated item is removed from the project. - linters: - staticcheck - text: "SA1019: (bootstrapv1.ClusterStatus|scope.Config.Spec.UseExperimentalRetryJoin|DockerMachine.Spec.Bootstrapped|machineStatus.Bootstrapped) is deprecated" + text: "SA1019: (bootstrapv1.ClusterStatus|KubeadmConfigSpec.UseExperimentalRetryJoin|scope.Config.Spec.UseExperimentalRetryJoin|DockerMachine.Spec.Bootstrapped|machineStatus.Bootstrapped|c.TopologyPlan) is deprecated" # Specific exclude rules for deprecated packages that are still part of the codebase. These # should be removed as the referenced deprecated packages are removed from the project. - linters: @@ -272,14 +273,21 @@ issues: text: always receives # Dot imports for gomega and ginkgo are allowed # within test files and test utils. - - path: _test\.go + - linters: + - revive + - stylecheck + path: _test\.go text: should not use dot imports - - path: (framework|e2e)/.*.go + - linters: + - revive + - stylecheck + path: (framework|e2e)/.*.go text: should not use dot imports - - path: util/defaulting/defaulting.go + - linters: + - revive + - stylecheck + path: util/defaulting/defaulting.go text: should not use dot imports - - path: _test\.go - text: cyclomatic complexity # Append should be able to assign to a different var/slice. - linters: - gocritic @@ -321,11 +329,6 @@ issues: - stylecheck text: "ST1016: methods on the same type should have the same receiver name" path: .*(api|types)\/.*\/conversion.*\.go$ - # hack/tools - - linters: - - typecheck - text: import (".+") is a program, not an importable package - path: ^tools\.go$ # We don't care about defer in for loops in test files. - linters: - gocritic diff --git a/.markdownlinkcheck.json b/.markdownlinkcheck.json index 78da0b7b6e15..3ee32466702c 100644 --- a/.markdownlinkcheck.json +++ b/.markdownlinkcheck.json @@ -1,6 +1,8 @@ { "ignorePatterns": [{ "pattern": "^http://localhost" + },{ + "pattern": "https://azure.microsoft.com/en-us/products/kubernetes-service" }], "httpHeaders": [{ "comment": "Workaround as suggested here: https://github.com/tcort/markdown-link-check/issues/201", diff --git a/CHANGELOG/OWNERS b/CHANGELOG/OWNERS new file mode 100644 index 000000000000..774819574f69 --- /dev/null +++ b/CHANGELOG/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - cluster-api-release-lead + +reviewers: + - cluster-api-release-team diff --git a/CHANGELOG/README.md b/CHANGELOG/README.md new file mode 100644 index 000000000000..0895b1f9815e --- /dev/null +++ b/CHANGELOG/README.md @@ -0,0 +1,5 @@ +# CHANGELOG + +This folder contains release notes for past releases. Changes to this folder in the main branch trigger a GitHub Action that creates release tags and a draft release. + +See [release documentation](../docs/release/release-tasks.md) for more information. diff --git a/CHANGELOG/v1.4.6.md b/CHANGELOG/v1.4.6.md new file mode 100644 index 000000000000..1ba105ce545d --- /dev/null +++ b/CHANGELOG/v1.4.6.md @@ -0,0 +1,37 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.23.x -> v1.27.x +- Workload Cluster: v1.21.x -> v1.27.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.4.5 +--- +## :chart_with_upwards_trend: Overview +- 20 new commits merged +- 9 bugs fixed 🐛 + +## :bug: Bug Fixes +- CAPBK: Certificate paths in cloud-init scripts should not use a platform-dependent path separator (#9231) +- CAPD: Delete container after failed start to work around port allocation issues (#9131) +- ClusterClass: Add topology-owned label to MachineHealthChecks. (#9203) +- ClusterClass: Fix ClusterClass enqueue for ExtensionConfig (#9138) +- Devtools: Fix tilt-prepare leader-elect setting (#9317) +- KCP: Allow unsetting useExperimentalRetryJoin field from KubeadmControlPlane (#9201) +- MachineDeployment: MD controller: use regular random suffix for MachineSets, ensure max length 63 (#9330) +- util: Fix AddAnnotations for unstructured.Unstructured (#9178) + +## :seedling: Others +- CAPD: Fix multi error handling in RunContainer (#9242) +- CI: Add verify-govulncheck and verify-vulnerabilities targets and integrate to scan action (#9180) +- CI: Update base branch for link checker (#9206) +- Dependency: Bump corefile-migration library to v1.0.21 (#9310) +- Dependency: Bump golang.org/x/net to v0.13.0 (#9123) +- Dependency: Bump to Go 1.19.12 (#9106) +- Dependency: Update cert-manager to v1.12.3 (#9199) +- logging: Fix patch errors not being logged (#9235) + +:book: Additionally, there have been 4 contributions to our documentation and book. (#9333, #9245, #9155, #9118) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.4.7.md b/CHANGELOG/v1.4.7.md new file mode 100644 index 000000000000..c3178ee9c7a1 --- /dev/null +++ b/CHANGELOG/v1.4.7.md @@ -0,0 +1,26 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.23.x -> v1.27.x +- Workload Cluster: v1.21.x -> v1.27.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.4.6 +--- +## :chart_with_upwards_trend: Overview +- 10 new commits merged +- 2 bugs fixed 🐛 + +## :bug: Bug Fixes +- KCP: Fix KCP Controller reconcile always return error when workload cluster is unreachable (#9450) +- MachineHealthCheck: Fix excessive trace logging in the machine health check controller (#9427) + +## :seedling: Others +- Dependency: Bump cert-manager to v1.13.0 (#9414) +- Dependency: Bump to Go 1.20.8 (#9432) +- KCP: Remove redundant GetRESTConfig in KCP Management.GetWorkloadCluster (#9453) + +:book: Additionally, there have been 3 contributions to our documentation and book. (#9366, #9431, #9492) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.4.8.md b/CHANGELOG/v1.4.8.md new file mode 100644 index 000000000000..03e35c5c3dd9 --- /dev/null +++ b/CHANGELOG/v1.4.8.md @@ -0,0 +1,26 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.23.x -> v1.27.x +- Workload Cluster: v1.21.x -> v1.27.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.4.7 +## :chart_with_upwards_trend: Overview +- 7 new commits merged +- 2 bugs fixed 🐛 + +## :bug: Bug Fixes +- Machine: Retry Node delete when CCT is locked (#9583) +- Machine: Revert ErrClusterLocked check in Machine controller (#9588) + +## :seedling: Others +- Dependency: Bump cert-manager to v1.13.1 (#9508) +- Dependency: Bump Go to v1.20.10 (#9553) +- Dependency: Bump golang.org/x/net to v0.17.0 (#9594) +- Release: Improve release staging build speed (#9554) + +:book: Additionally, there has been 1 contribution to our documentation and book. (#9597) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.4.9.md b/CHANGELOG/v1.4.9.md new file mode 100644 index 000000000000..3390b4de9bed --- /dev/null +++ b/CHANGELOG/v1.4.9.md @@ -0,0 +1,33 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.23.x -> v1.27.x +- Workload Cluster: v1.21.x -> v1.27.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.4.8 +## :chart_with_upwards_trend: Overview +- 15 new commits merged +- 5 bugs fixed 🐛 + +## :bug: Bug Fixes +- CAPD: Fix DockerMachine panic (#9690) +- clusterctl: Fix provider namespace secret not included in clusterctl move (#9749) +- ClusterResourceSet: Requeue after 1 minute if ErrClusterLocked got hit (#9809) +- KCP: Allow dropping patches KubeadmControlPlane KubeadmConfig (#9700) +- Runtime SDK: Set User Agent for test extension correctly (#9751) + +## :seedling: Others +- CAPD: Set Condition, if creating external LB failed. (#9712) +- Dependency: Bump cert-manager to v1.13.2 (#9657) +- Dependency: Bump controller runtime to v1.14.7 (#9625) +- Dependency: Bump github.com/docker/docker from 24.0.5 to 24.0.7 (#9674) +- Dependency: Bump Go version to v1.20.11 (#9687) +- Dependency: Bump golang.org/grpc to v1.59.0 (#9628) +- Dependency: Fix delve version in Tiltfile (#9701) +- e2e: Improve output of exec.KubectlApply (#9766) +- KCP: Support admin config for Kubeadm v1.29 (#9685) +- Release: Revert "Improve release staging build speed" (#9763) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.5.1.md b/CHANGELOG/v1.5.1.md new file mode 100644 index 000000000000..9813a5c16110 --- /dev/null +++ b/CHANGELOG/v1.5.1.md @@ -0,0 +1,51 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.24.x -> v1.28.x +- Workload Cluster: v1.22.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.0 +--- +## :chart_with_upwards_trend: Overview +- 35 new commits merged +- 2 feature additions ✨ +- 10 bugs fixed 🐛 + +## :sparkles: New Features +- MachineSet: Adjust preflight check to allow kubelet version skew of 3 for clusters running v1.28 and above (#9233) +- Testing/documentation: v1.28: Prepare quickstart, capd and tests for the new release including kind bump (#9225) + +## :bug: Bug Fixes +- CAPBK: Certificate paths in cloud-init scripts should not use a platform-dependent path separator (#9230) +- CAPD: Delete container after failed start to work around port allocation issues (#9130) +- ClusterClass: Add topology-owned label to MachineHealthChecks. (#9204) +- ClusterClass: Fix ClusterClass enqueue for ExtensionConfig (#9137) +- Devtools: Fix tilt-prepare leader-elect setting (#9316) +- e2e: Pin conformance image to a version which includes a fix for the dualstack tests (#9256) +- KCP: Allow unsetting useExperimentalRetryJoin field from KubeadmControlPlane (#9202) +- MachineDeployment: MD controller: use regular random suffix for MachineSets, ensure max length 63 (#9329) +- util: Fix AddAnnotations for unstructured.Unstructured (#9176) + +## :seedling: Others +- CAPD: Fix multi error handling in RunContainer (#9243) +- CI: Add verify-govulncheck and verify-vulnerabilities targets and integrate to scan action (#9179) +- CI: Update base branch for link checker (#9207) +- clusterctl: Set controller-runtime logger for clusterctl (#9129) +- clusterctl: Update cert-manager to v1.12.3 (#9198) +- Dependency: Bump controller-runtime to v0.15.1 (#9127) +- Dependency: Bump corefile-migration library to v1.0.21 (#9309) +- Dependency: Bump envtest binaries to 1.28 (#9304) +- Dependency: Bump golang.org/x/net to v0.13.0 (#9122) +- Dependency: Bump to Go 1.20.7 (#9105) +- e2e: Add back flavor to Waiter interface (#9177) +- e2e: Add CRS re-reconcile to ownerReference test (#9301) +- e2e: Add test for ownerReference apiVersion update (#9297) +- e2e: Refactor e2e ownerRef test utils (#9314) +- e2e: Test and document controller ownerReferences (#9303) +- logging: Fix patch errors not being logged (#9234) + +:book: Additionally, there have been 8 contributions to our documentation and book. (#9332, #9226, #9205, #9238, #9154, #9117, #9090, #9082) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.5.2.md b/CHANGELOG/v1.5.2.md new file mode 100644 index 000000000000..ec769d669924 --- /dev/null +++ b/CHANGELOG/v1.5.2.md @@ -0,0 +1,36 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.24.x -> v1.28.x +- Workload Cluster: v1.22.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.1 +--- +## :chart_with_upwards_trend: Overview +- 19 new commits merged +- 1 feature addition ✨ +- 4 bugs fixed 🐛 + +## :sparkles: New Features +- ClusterClass: Introduce NamingStrategy and allow generating names using go templates (#9428) + +## :bug: Bug Fixes +- e2e: Fix autoscaler image repo (#9357) +- KCP: Fix KCP Controller reconcile always return error when workload cluster is unreachable (#9449) +- MachineHealthCheck: Fix excessive trace logging in the machine health check controller (#9426) +- Testing: Don't use v1alpha3 in clusterctl upgrade test (#9412) + +## :seedling: Others +- ClusterClass: Improve Cluster variable defaulting/validation errors (#9479) +- ClusterClass: Improve message for TopologyReconciledCondition (#9401) +- Dependency: Bump cert-manager to v1.13.0 (#9413) +- Dependency: Bump to Go 1.20.8 (#9383) +- e2e: Bump autoscaler to v1.28.0 (#9351) +- KCP: Remove redundant GetRESTConfig in KCP Management.GetWorkloadCluster (#9451) +- Runtime SDK: Improve ClusterClass watch for ExtensionConfigs (#9356) + +:book: Additionally, there have been 6 contributions to our documentation and book. (#9365, #9430, #9435, #9466, #9470, #9491) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.5.3.md b/CHANGELOG/v1.5.3.md new file mode 100644 index 000000000000..d0bca89e2b36 --- /dev/null +++ b/CHANGELOG/v1.5.3.md @@ -0,0 +1,28 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.24.x -> v1.28.x +- Workload Cluster: v1.22.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.2 +## :chart_with_upwards_trend: Overview +- 10 new commits merged +- 2 bugs fixed 🐛 + +## :bug: Bug Fixes +- e2e: Fix broken e2e test clusterclass (#9504) +- Machine: Retry Node delete when CCT is locked (#9582) + +## :seedling: Others +- Dependency: Bump cert-manager to v1.13.1 (#9507) +- Dependency: Bump Go to v1.20.10 (#9552) +- Dependency: Bump go.opentelemetry.io/* dependencies (#9599) +- Dependency: Bump golang.org/x/net to v0.17.0 (#9595) +- e2e: Add log level for kube components patch to ClusterClass (#9501) +- Release: Improve release staging build speed (#9555) + +:book: Additionally, there have been 2 contributions to our documentation and book. (#9518, #9596) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.5.4.md b/CHANGELOG/v1.5.4.md new file mode 100644 index 000000000000..9ba5a516a2ef --- /dev/null +++ b/CHANGELOG/v1.5.4.md @@ -0,0 +1,37 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.24.x -> v1.28.x +- Workload Cluster: v1.22.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.3 +## :chart_with_upwards_trend: Overview +- 18 new commits merged +- 6 bugs fixed 🐛 + +## :bug: Bug Fixes +- CAPD: Fix DockerMachine panic (#9689) +- CI: Fix reporting bug in verify-container-image script (#9677) +- clusterctl: Fix provider namespace secret not included in clusterctl move (#9746) +- ClusterResourceSet: Requeue after 1 minute if ErrClusterLocked got hit (#9787) +- KCP: Allow dropping patches KubeadmControlPlane KubeadmConfig (#9699) +- Runtime SDK: Set User Agent for test extension correctly (#9750) + +## :seedling: Others +- CAPD: Set Condition, if creating external LB failed. (#9711) +- Dependency: Bump cert-manager to v1.13.2 (#9658) +- Dependency: Bump controller runtime to v1.15.3 (#9624) +- Dependency: Bump github.com/docker/docker from 24.0.5 to 24.0.7 (#9675) +- Dependency: Bump Go version to v1.20.11 (#9686) +- Dependency: Bump golang.org/grpc to v1.59.0 (#9627) +- Dependency: Bump opentelemetry to fix CVEs (#9710) +- e2e: Improve output of exec.KubectlApply (#9765) +- KCP: Support admin config for Kubeadm v1.29 (#9684) +- Release: Remove auto-labelling for clusterctl (#9659) +- Release: Revert "Improve release staging build speed" (#9764) + +:book: Additionally, there has been 1 contribution to our documentation and book. (#9722) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.5.5.md b/CHANGELOG/v1.5.5.md new file mode 100644 index 000000000000..c9e1ccb6cff1 --- /dev/null +++ b/CHANGELOG/v1.5.5.md @@ -0,0 +1,31 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.24.x -> v1.28.x +- Workload Cluster: v1.22.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.4 +## :chart_with_upwards_trend: Overview +- 9 new commits merged +- 1 feature addition ✨ +- 4 bugs fixed 🐛 + +## :sparkles: New Features +- Control-plane: KCP: Allow mutation of all fields that should be mutable (#9884) + +## :bug: Bug Fixes +- clusterctl: Validate no objects exist from CRDs before deleting them (#9835) +- e2e: Test: wait for topology to get rolled out before continuing with scaling checks (#9828) +- MachinePool: Fix TestReconcileMachinePoolScaleToFromZero flakes (#9823) +- Testing: SSA: fix flaky test TestPatch/Test patch with Machine (#9916) + +## :seedling: Others +- Dependency: Bump golang.org/x/crypto to v0.17.0 (#9944) +- Dependency: Bump to Go 1.20.12 (#9842) +- e2e: Rename scale test to drop [Scale] tag (#9977) + +:book: Additionally, there has been 1 contribution to our documentation and book. (#9877) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.5.6.md b/CHANGELOG/v1.5.6.md new file mode 100644 index 000000000000..86ca91ad340f --- /dev/null +++ b/CHANGELOG/v1.5.6.md @@ -0,0 +1,24 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.24.x -> v1.28.x +- Workload Cluster: v1.22.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.5 +## :chart_with_upwards_trend: Overview +- 6 new commits merged +- 1 bug fixed 🐛 + +## :bug: Bug Fixes +- ClusterCacheTracker: Fix ClusterCacheTracker memory leak (#10065) + +## :seedling: Others +- clusterctl: Bump cert-manager to v1.14.2 (#10121) (#10128) +- Community meeting: Promote chrischdi to Cluster API maintainer (#10090) +- Dependency: Bump Go to 1.21.5 (#10153) + +:book: Additionally, there has been 1 contribution to our documentation and book. (#10117) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.5.7.md b/CHANGELOG/v1.5.7.md new file mode 100644 index 000000000000..48be9cf9e0b8 --- /dev/null +++ b/CHANGELOG/v1.5.7.md @@ -0,0 +1,36 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.24.x -> v1.28.x +- Workload Cluster: v1.22.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.6 +## :chart_with_upwards_trend: Overview +- 8 new commits merged +- 1 bug fixed 🐛 + +## :bug: Bug Fixes +- e2e: Test: retry GetOwnerGraph when having certificate issues (#10218) + +## :seedling: Others +- CABPK: Add pod metadata to capbk manager (#10213) +- Dependency: Bump go version to 1.21.8 (#10247) +- Dependency: Bump protobuf to v1.33.0 to address CVEs (#10250) +- Release: Improve release-staging build (#10227) +- Release: Read in dockerfiles from stdin (#10210) + +:book: Additionally, there have been 2 contributions to our documentation and book. (#10234, #10259) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- google.golang.org/protobuf: v1.31.0 → v1.33.0 + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.5.8.md b/CHANGELOG/v1.5.8.md new file mode 100644 index 000000000000..723c28a4d217 --- /dev/null +++ b/CHANGELOG/v1.5.8.md @@ -0,0 +1,42 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.24.x -> v1.28.x +- Workload Cluster: v1.22.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.7 +## :chart_with_upwards_trend: Overview +- 11 new commits merged +- 5 bugs fixed 🐛 + +## :bug: Bug Fixes +- CI: Fix for TestServerSideApplyWithDefaulting (#10344) +- ClusterClass: Improve handling of topology orphaned objects (#10347) +- e2e: fix kubetest to allow parallel execution on different clusters (#10432) +- KCP: Skip checking `clusterConfiguration.dns` fields when KCP checking MachineNeedRollout (#10304) +- MachineSet: deletion priority to avoid deleting too many machines (#10429) + +## :seedling: Others +- CI: Added go directive test (#10349) +- clusterctl: Bump cert-manager to 1.14.4 (#10273) +- Dependency: Bump docker to address CVE (#10316) +- Dependency: Bump golang to v1.21.9 and golang.org/x/net to mitigate CVE-2023-45288 (#10377) + +:book: Additionally, there have been 2 contributions to our documentation and book. (#10296, #10339) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- golang.org/x/crypto: v0.17.0 → v0.21.0 +- golang.org/x/net: v0.17.0 → v0.23.0 +- golang.org/x/sys: v0.15.0 → v0.18.0 +- golang.org/x/term: v0.15.0 → v0.18.0 + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.6.0-beta.0.md b/CHANGELOG/v1.6.0-beta.0.md new file mode 100644 index 000000000000..78db537f39a4 --- /dev/null +++ b/CHANGELOG/v1.6.0-beta.0.md @@ -0,0 +1,257 @@ +🚨 This is a BETA release. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). + +## Highlights + +* MachinePools are now supported in ClusterClass Clusters +* Metrics, profiling and other diagnostics are now served securely by default +* Types in `ipam.cluster.x-k8s.io` have graduated to `v1beta1` + +## Deprecation Warning + +- The API version `v1alpha4` is no longer served in this release. + - [Version migration guide](https://main.cluster-api.sigs.k8s.io/developer/providers/version-migration.html) + - [GitHub issue #8038](https://github.com/kubernetes-sigs/cluster-api/issues/8038) + - [API deprecation details](https://main.cluster-api.sigs.k8s.io/contributing#removal-of-v1alpha3--v1alpha4-apiversions) +- The API version `v1alpha3` has been completely removed in this release. +- Flag `--metrics-bind-addr` is [deprecated](https://github.com/kubernetes-sigs/cluster-api/pull/9264) for all controllers + +
+More details about the release + +:warning: **BETA RELEASE NOTES** :warning: + +## 👌 Kubernetes version support + +- Management Cluster: v1.25.x -> v1.28.x +- Workload Cluster: v1.23.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.0 +## :chart_with_upwards_trend: Overview +- 310 new commits merged +- 6 breaking changes :warning: +- 13 feature additions ✨ +- 28 bugs fixed 🐛 + +## :warning: Breaking Changes +- API: Remove v1alpha3 API Version (#8997) +- API: Stop serving v1alpha4 API Versions (#8996) +- clusterctl: Improve Context handling in clusterctl (#8939) +- Dependency: Bump to controller-runtime v0.16 (#8999) +- Metrics/Logging: Implement secure diagnostics (metrics, pprof, log level changes) (#9264) +- util: Remove go-vcs dependency from releaselink tool (#9288) + +## :sparkles: New Features +- API: Add validation to nested ObjectMeta fields (#8431) +- CAPD: Add config maps to CAPD RBAC (#9528) +- CAPD: Allow adding custom HA proxy config for CAPD load balancer (#8785) +- CAPD: Initialize configmap object before getting it (#9529) +- ClusterClass: Add topology-owned label to MachineHealthChecks. (#9191) +- ClusterClass: Introduce NamingStrategy and allow generating names using go templates (#9340) +- ClusterClass: Update API with ClusterClass MachinePool support (#8820) +- clusterctl: Block move with annotation (#8690) +- IPAM: Promote IPAM types to v1beta1 (#9525) +- MachinePool: Add MachinePool workers support in ClusterClass (#9016) +- MachineSet: Adjust preflight check to allow kubelet version skew of 3 for clusters running v1.28 and above (#9222) +- Release: Add automation to create release branch and tags (#9111) +- Testing/documentation: V1.28: Prepare quickstart, capd and tests for the new release including kind bump (#9160) + +## :bug: Bug Fixes +- CAPBK: Certificate paths in cloud-init scripts should not use a platform-dependent path separator (#9167) +- CAPD: Delete container after failed start to work around port allocation issues (#9125) +- ClusterClass: Fix ClusterClass enqueue for ExtensionConfig (#9133) +- ClusterClass: Topology: fix namingstrategy webhook to not use uppercase characters for testing the template and align unit test to e2e test (#9425) +- Dependency: Bump golang.org/x/net to v0.13.0 (#9121) +- Dependency: Bump to docker v24.0.5-0.20230714235725-36e9e796c6fc (#9038) +- Devtools: Adding metrics container port in tilt-prepare only if it's missing (#9308) +- Devtools: Allow duplicate objects in Tiltfile (#9302) +- Devtools: Change tilt debug base image to golang (#9070) +- Devtools: Fix tilt-prepare leader-elect setting (#9315) +- Devtools: Pin Plantuml version (#9424) +- Devtools: Tilt: ensure .tiltbuild/bin directory is created early enough, add tilt troubleshooting guide (#9165) +- e2e: Drop MachinePools from Dualstack tests (#9477) +- e2e: Fix autoscaler image repo (#9353) +- e2e: Test: pin conformance image to a version which includes a fix for the dualstack tests (#9252) +- KCP: Allow to drop useExperimentalRetryJoin field from KubeadmControlPlane.kubeadmConfigSpec (#9170) +- KCP: Fix KCP Controller reconcile always return error when workload cluster is unreachable (#9342) +- KCP: Requeue KCP object if ControlPlaneComponentsHealthyCondition is not yet true (#9032) +- Machine: Retry Node delete when CCT is locked (#9570) +- MachineDeployment: MD controller: use regular random suffix for MachineSets, ensure max length 63 (#9298) +- MachineHealthCheck: Fix excessive trace logging in the machine health check controller (#9419) +- Release: Alphabetically sorting release tool output (#9055) +- Release: Deduplicating area in pr title in release notes (#9186) +- Release: Fix go install path for kpromo v4.0.4 (#9336) +- Release: Generate warning when release notes can not be generated (#9163) +- Release: Hack/release-notes: ensure relase notes tool can be used for external projects again (#9018) +- Release: Reverts pull request from cahillsf/improve-release-speed (#9465) +- util: Fix AddAnnotations for unstructured.Unstructured (#9164) + +## :seedling: Others +- API: Add ClusterClass column to Cluster CRD (#9120) +- API: Add verify-import-restrictions to enforce import restrictions (#9407) +- API: Enforce import restrictions in all API packages (#9461) +- API: Move API v1beta1 webhooks to a separate package (#9047) +- API: Move docker infrastructure experimental API v1beta1 webhooks to sepa… (#9460) +- API: Move docker infrastructure API v1beta1 webhooks to separate package (#9458) +- API: Move experimental addons API v1beta1 webhooks to separate package (#9438) +- API: Move experimental API v1beta1 webhooks to separate package (#9417) +- API: Move inmemory infrastructure API v1beta1 webhooks to separate package (#9459) +- API: Move Kubeadm API v1beta1 webhooks to separate package (#9410) +- API: Remove files and markers for Kubebuilder (#9344) +- API: Remove reliance on controller-runtime scheme builder (#9045) +- API: Remove reliance on controller-runtime scheme builder for experimental APIs (#9185) +- API: Remove reliance on controller-runtime scheme builder for remaining API groups (#9266) +- API: Remove the dependency on cluster-api/utils from addons API (#9482) +- API: Test and document controller ownerReferences (#9153) +- CAPBK: Remove Kubeadm upstream v1beta1 types (#9345) +- CAPD: Fix multi error handling in RunContainer (#9139) +- CI: Add colored-line-number output for golangci-lint action (#9147) +- CI: Add dependabot for test and hack/tools module (#9041) +- CI: Add exclude for Kustomize API to dependabot config (#9059) +- CI: Add licence-scan for pull requests (#9184) +- CI: Add loggercheck linter and fix findings (#9446) +- CI: Add verify-govulncheck and verify-vulnerabilities targets and integrate to scan action (#9144) +- CI: Bump actions/cache from 3.3.1 to 3.3.2 (#9395) +- CI: Bump actions/checkout from 4.1.0 to 4.1.1 (#9611) +- CI: Bump actions/setup-go from 4.0.1 to 4.1.0 (#9187) +- CI: Bump apidiff to v0.7.0 (#9472) +- CI: Bump golangci-lint to v1.54.1 (#9174) +- CI: Bump golangci/golangci-lint-action from 3.6.0 to 3.7.0 (#9261) +- CI: Bump tj-actions/changed-files from 39.2.2 to 39.2.4 (#9610) +- CI: Bump Trivy to v0.45.1 (#9445) +- CI: Fix .golangci.yml comments (#9499) +- CI: Ginkgolinter: forbid focus container (#9320) +- CI: Github: add edited and reopened as triggers for the GH workflow approval (#9259) +- CI: Github: add workflow to auto-approve golangci-lint if ok-to-test label is set (#9244) +- CI: Go.mod reformat to have only two require blocks (#9192) +- CI: Golangci-lint: replace deprecated local-prefixes setting for gci (#9339) +- CI: Make GO_ARCH explicit in verify_containter script (#9341) +- CI: Update actions for 1.5 and make names consistent (#9115) +- CI: Verify plantuml image generation in CI (#9363) +- ClusterCacheTracker: Add separate concurrency flag for cluster cache tracker (#9116) +- ClusterCacheTracker: Ensure Get/List calls are not getting stuck when apiserver is unreachable (#9028) +- ClusterCacheTracker: Fix accessor deletion on health check failure (#9025) +- ClusterClass: Add ownerRefs to BootstrapConfig/InfraMachinePool in classy Clusters (#9389) +- ClusterClass: Embed ssa.FilterObjectInput into HelperOption to remove duplication (#9512) +- ClusterClass: Fix some nits in Cluster topology engine tests (#9464) +- ClusterClass: Improve Cluster variable defaulting/validation errors (#9452) +- ClusterClass: Improve message for TopologyReconciledCondition (#9400) +- ClusterClass: Make ClusterClass generated object names consistent (#9254) +- ClusterClass: Minor fixes for CC+MP implementation (#9318) +- clusterctl: Check resource blocking clusterctl move during discovery (#9246) +- clusterctl: Use goproxy to check version in clusterctl (#9237) +- clusterctl: Use http get to download files from GitHub in clusterctl (#9236) +- Dependency: Bump cert-manager to v1.13.1 (#9505) +- Dependency: Bump cloud.google.com/go/storage from 1.32.0 to 1.33.0 in /hack/tools (#9423) +- Dependency: Bump controller tools to v1.13.0 (#9221) +- Dependency: Bump controller-runtime to v0.16.3 (#9592) +- Dependency: Bump conversion-gen to v0.28 (#9267) +- Dependency: Bump corefile-migration library to v1.0.21 (#9307) +- Dependency: Bump docker to v24.0.5 (#9064) +- Dependency: Bump envtest binaries to 1.28 (#9268) +- Dependency: Bump github.com/blang/semver to v4 (#9189) +- Dependency: Bump github.com/docker/distribution (#9544) +- Dependency: Bump github.com/docker/docker from 24.0.5+incompatible to 24.0.6+incompatible in /test (#9377) +- Dependency: Bump github.com/emicklei/go-restful/v3 from 3.10.2 to 3.11.0 in /test (#9272) +- Dependency: Bump github.com/evanphx/json-patch/v5 from 5.6.0 to 5.7.0 (#9397) +- Dependency: Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 (#9562) +- Dependency: Bump github.com/onsi/ginkgo/v2 from 2.12.1 to 2.13.0 (#9533) +- Dependency: Bump github.com/onsi/gomega from 1.28.0 to 1.28.1 (#9608) +- Dependency: Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 (#9517) +- Dependency: Bump github.com/spf13/viper from 1.16.0 to 1.17.0 (#9561) +- Dependency: Bump Go to v1.20.10 (#9551) +- Dependency: Bump go-github dependency to version v53 (#8995) +- Dependency: Bump go.opentelemetry.io/* dependencies (#9598) +- Dependency: Bump golang.org/x/net from 0.15.0 to 0.17.0 in /test (#9537) +- Dependency: Bump golang.org/x/oauth2 from 0.12.0 to 0.13.0 (#9534) +- Dependency: Bump golang.org/x/text from 0.12.0 to 0.13.0 (#9370) +- Dependency: Bump gomodules.xyz/jsonpatch/v2 from 2.3.0 to 2.4.0 (#9188) +- Dependency: Bump google.golang.org/api from 0.146.0 to 0.148.0 in /hack/tools (#9581) +- Dependency: Bump google.golang.org/grpc from 1.58.2 to 1.58.3 (#9607) +- Dependency: Bump kpromo to v4.0.4 (#9241) +- Dependency: Bump some dependencies in Makefile (#9549) +- Dependency: Bump the kubernetes group in /hack/tools with 2 updates (#9420) +- Dependency: Bump the kubernetes group in /hack/tools with 2 updates (#9574) +- Dependency: Bump the kubernetes group with 2 updates (#9606) +- Dependency: Bump the kubernetes group with 4 updates (#9326) +- Dependency: Bump the kubernetes group with 4 updates (#9455) +- Dependency: Bump to Go 1.20.8 (#9381) +- Dependency: Bump trivy to v0.46.0 (#9558) +- Dependency: Replace hashicorp/go-multierror with kerrors (#9175) +- Dependency: Update ensure-kubectl.sh to 1.28 (#9275) +- Devtools: Add .PHONY for doctoc target (#9148) +- Devtools: Bump CAPI visualizer to v1.2.0 (#9195) +- Devtools: Drop duplicate pprof and unused linter excludes (#9156) +- Devtools: Improve Performance dashboard (#9387) +- Devtools: Make dev cluster networking configurable (#9183) +- Devtools: Makefile: run doctoc only once (#9182) +- Devtools: Move tilt-prepare and log-push to tools/internal (#9020) +- Devtools: Observability: move metrics to config and use sidecar in kube-state-metrics (#9390) +- Devtools: Refactor docker-push* Makefile targets so users can control with ALL_DOCKER_BUILD which images are pushed (#8586) +- Devtools: Tiltfile: rename deploy_kustomizations to additional_kustomizations (#9439) +- e2e: Add back flavor to Waiter interface (#9166) +- e2e: Add CRS re-reconcile to ownerReference test (#9296) +- e2e: Add log level for kube components patch to ClusterClass (#9493) +- e2e: Add MachinePools to Topology Quickstart E2E Templates (#9393) +- e2e: Add test for ownerReference apiVersion update (#9269) +- e2e: Add test for scale testing machinery (#9510) +- e2e: Bump autoscaler to v1.28.0 (#9349) +- e2e: Drop PR-Informing test tag and job description (#9362) +- e2e: Dump all pods in e2e test clusters (#9441) +- e2e: Dump all resource information for self-hosted tests (#9547) +- e2e: Ensure finalizers are resilient on reconciliation (#9471) +- e2e: Fail tests if test env version check fails (#9388) +- e2e: Fix broken e2e test clusterclass (#9506) +- e2e: Improve labels/annotations in CAPD test ClusterClass (#9469) +- e2e: Refactor e2e ownerRef test utils (#9313) +- e2e: Test/e2e: structure resources by namespace/kind again (#9462) +- e2e: Use existing value of `SKIP_RESOURCE_CLEANUP` if set in environment (#9152) +- IPAM: Add age column to kubectl output (#9521) +- KCP: Controlplane: add a test case for syncMachines where the InfraMachine does not exist. (#8992) +- KCP: Remove disableInPlacePropagation field in KCP controller (#9099) +- KCP: Remove redundant GetRESTConfig in KCP Management.GetWorkloadCluster (#9448) +- Logging: Change default log level to 2 (#9093) +- Logging: Fix patch errors not being logged (#9224) +- Logging: Set controller-runtime logger for clusterctl (#9107) +- MachinePool: Add MachinePool Builders (#9346) +- MachinePool: Add util function to get MachinePool by label (#9219) +- Metrics: Hack/observability: add capi_machine_status_certificatesexpirydate metric (#9084) +- Metrics: Hack: bump kube-state-metrics and prometheus charts (#9352) +- Release: Add additional blocks to release note generation (#9247) +- Release: Add Release Team OWNERS file to docs/release folder (#9294) +- Release: Capitalized title in release notes (#9086) +- Release: Clarify release team vs k8s/k8s-SIGs org membership (#9089) +- Release: Exclude release trigger PRs from release notes (#9444) +- Release: Format MachineHealthCheck area in release notes (#9500) +- Release: Improve multiple areas PRs with user friendly subs (#9071) +- Release: Improve release notes formatting (#9337) +- Release: Improve release speed (#9392) +- Release: Improve release staging build speed (#9536) +- Release: Prepare main branch for v1.6 development (#9097) +- Release: Remove auto-labelling for clusterctl (#8990) +- Release: Remove extra separator after title in release notes (#9605) +- Release: Update instructions checklist to generate release notes (#9443) +- Release: Update kubebuilder-release-tools to v0.4.0 (#9531) +- Release: Update release-notes make target + corresponding doc (#9573) +- Runtime SDK: Improve ClusterClass watch for ExtensionConfigs (#9338) +- Testing: Add MachinePool test cases (#9474) +- Testing: Add MachinePool test cases to engine tests (#9373) +- Testing: Add MachinePool test cases to variables tests (#9372) +- Testing: Add MachinePools to topology upgrade test (#9502) +- Testing: Add test for required properties in clusterclass variables (#9113) +- Testing: Add unit tests for CC MP blueprint, current_state, & desired_state (#9348) +- Testing: Add unit tests for reconcile_state, cluster_controller, & conditions (#9380) +- Testing: Extend test/framework to collect workload cluster nodes (#9416) +- Testing: Replacing gomega.Equal with gomega.BeComparableTo (#9015) +- util: Adjust naming in SortForCreate implementation (#9311) +- util: Move `internal.labels` to `format` package for use by providers (#9002) +- util: Refactor SortForCreate to use sort.Slice (#9251) +- util: Remove previously deprecated code (#9136) + +:book: Additionally, there have been 46 contributions to our documentation and book. (#8260, #8678, #8819, #8988, #9001, #9013, #9014, #9024, #9029, #9080, #9081, #9087, #9112, #9119, #9141, #9146, #9150, #9161, #9173, #9208, #9209, #9213, #9214, #9232, #9270, #9286, #9291, #9305, #9328, #9364, #9386, #9403, #9415, #9429, #9433, #9463, #9487, #9488, #9490, #9511, #9513, #9514, #9527, #9550, #9572, #9593) + +
+
+ +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.6.0-beta.1.md b/CHANGELOG/v1.6.0-beta.1.md new file mode 100644 index 000000000000..c95e5a19372f --- /dev/null +++ b/CHANGELOG/v1.6.0-beta.1.md @@ -0,0 +1,291 @@ +🚨 This is a BETA RELEASE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). + +## Highlights + +* MachinePools are now supported in ClusterClass Clusters +* Metrics, profiling and other diagnostics are now served securely by default +* Types in `ipam.cluster.x-k8s.io` have graduated to `v1beta1` + +## Deprecation Warning + +- The API version `v1alpha4` is no longer served in this release. + - [Version migration guide](https://main.cluster-api.sigs.k8s.io/developer/providers/version-migration.html) + - [GitHub issue #8038](https://github.com/kubernetes-sigs/cluster-api/issues/8038) + - [API deprecation details](https://main.cluster-api.sigs.k8s.io/contributing#removal-of-v1alpha3--v1alpha4-apiversions) +- The API version `v1alpha3` has been completely removed in this release. +- Flag `--metrics-bind-addr` is [deprecated](https://github.com/kubernetes-sigs/cluster-api/pull/9264) for all controllers + +## Changes since v1.6.0-beta.0 +## :chart_with_upwards_trend: Overview +- 14 new commits merged + +## :memo: Proposals +- Community meeting: Add proposal for karpenter integration feature group (#9571) + +## :seedling: Others +- API: Ensure we generate all webhook manifests.yaml (#9621) +- CI: Bump tj-actions/changed-files from 39.2.4 to 40.0.0 (#9641) +- Dependency: Bump github.com/go-logr/logr from 1.2.4 to 1.3.0 (#9644) +- Dependency: Bump github.com/onsi/gomega from 1.28.1 to 1.29.0 (#9643) +- Dependency: Bump go.etcd.io/etcd/api/v3 from 3.5.9 to 3.5.10 (#9642) +- Dependency: Bump go.etcd.io/etcd/client/v3 from 3.5.9 to 3.5.10 (#9646) +- Dependency: Bump golang.org/grpc to v1.59.0 (#9626) +- Dependency: Bump sigs.k8s.io/yaml from 1.3.0 to 1.4.0 (#9645) +- Release: Capitalize area prefix in release notes by default (#9614) + +:book: Additionally, there have been 3 contributions to our documentation and book. (#9565, #9590, #9613) + +
+More details about the release v1.28.x +- Workload Cluster: v1.23.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.0 +## :chart_with_upwards_trend: Overview +- 326 new commits merged +- 6 breaking changes :warning: +- 13 feature additions ✨ +- 29 bugs fixed 🐛 + +## :memo: Proposals +- Community-meeting: Add proposal for karpenter integration feature group (#9571) + +## :warning: Breaking Changes +- API: Remove v1alpha3 API Version (#8997) +- API: Stop serving v1alpha4 API Versions (#8996) +- clusterctl: Improve Context handling in clusterctl (#8939) +- Dependency: Bump to controller-runtime v0.16 (#8999) +- Metrics/Logging: Implement secure diagnostics (metrics, pprof, log level changes) (#9264) +- util: Remove go-vcs dependency from releaselink tool (#9288) + +## :sparkles: New Features +- API: Add validation to nested ObjectMeta fields (#8431) +- CAPD: Add config maps to CAPD RBAC (#9528) +- CAPD: Allow adding custom HA proxy config for CAPD load balancer (#8785) +- CAPD: Initialize configmap object before getting it (#9529) +- ClusterClass: Add topology-owned label to MachineHealthChecks. (#9191) +- ClusterClass: Introduce NamingStrategy and allow generating names using go templates (#9340) +- ClusterClass: Update API with ClusterClass MachinePool support (#8820) +- clusterctl: Block move with annotation (#8690) +- IPAM: Promote IPAM types to v1beta1 (#9525) +- MachinePool: Add MachinePool workers support in ClusterClass (#9016) +- MachineSet: Adjust preflight check to allow kubelet version skew of 3 for clusters running v1.28 and above (#9222) +- Testing/Documentation: V1.28: Prepare quickstart, capd and tests for the new release including kind bump (#9160) +- Release: Add automation to create release branch and tags (#9111) + +## :bug: Bug Fixes +- CAPBK: Certificate paths in cloud-init scripts should not use a platform-dependent path separator (#9167) +- CAPD: Delete container after failed start to work around port allocation issues (#9125) +- ClusterClass: Fix ClusterClass enqueue for ExtensionConfig (#9133) +- ClusterClass: Topology: fix namingstrategy webhook to not use uppercase characters for testing the template and align unit test to e2e test (#9425) +- Dependency: Bump golang.org/x/net to v0.13.0 (#9121) +- Dependency: Bump to docker v24.0.5-0.20230714235725-36e9e796c6fc (#9038) +- Devtools: Adding metrics container port in tilt-prepare only if it's missing (#9308) +- Devtools: Allow duplicate objects in Tiltfile (#9302) +- Devtools: Change tilt debug base image to golang (#9070) +- Devtools: Fix tilt-prepare leader-elect setting (#9315) +- Devtools: Pin Plantuml version (#9424) +- Devtools: Tilt: ensure .tiltbuild/bin directory is created early enough, add tilt troubleshooting guide (#9165) +- Documentation: Fix doctoc detection in verify-doctoc.sh (#9112) +- e2e: Drop MachinePools from Dualstack tests (#9477) +- e2e: Fix autoscaler image repo (#9353) +- e2e: Test: pin conformance image to a version which includes a fix for the dualstack tests (#9252) +- KCP: Allow to drop useExperimentalRetryJoin field from KubeadmControlPlane.kubeadmConfigSpec (#9170) +- KCP: Fix KCP Controller reconcile always return error when workload cluster is unreachable (#9342) +- KCP: Requeue KCP object if ControlPlaneComponentsHealthyCondition is not yet true (#9032) +- Machine: Retry Node delete when CCT is locked (#9570) +- MachineDeployment: MD controller: use regular random suffix for MachineSets, ensure max length 63 (#9298) +- MachineHealthCheck: Fix excessive trace logging in the machine health check controller (#9419) +- Release: Alphabetically sorting release tool output (#9055) +- Release: Deduplicating area in pr title in release notes (#9186) +- Release: Fix go install path for kpromo v4.0.4 (#9336) +- Release: Generate warning when release notes can not be generated (#9163) +- Release: Hack/release-notes: ensure relase notes tool can be used for external projects again (#9018) +- Release: Reverts pull request from cahillsf/improve-release-speed (#9465) +- util: Fix AddAnnotations for unstructured.Unstructured (#9164) + +## :seedling: Others +- API: Add ClusterClass column to Cluster CRD (#9120) +- API: Add verify-import-restrictions to enforce import restrictions (#9407) +- API: Enforce import restrictions in all API packages (#9461) +- API: Ensure we generate all webhook manifests.yaml (#9621) +- API: Move API v1beta1 webhooks to a separate package (#9047) +- API: Move docker infrastructure experimental API v1beta1 webhooks to sepa… (#9460) +- API: Move docker infrastructure API v1beta1 webhooks to separate package (#9458) +- API: Move experimental addons API v1beta1 webhooks to separate package (#9438) +- API: Move experimental API v1beta1 webhooks to separate package (#9417) +- API: Move inmemory infrastructure API v1beta1 webhooks to separate package (#9459) +- API: Move Kubeadm API v1beta1 webhooks to separate package (#9410) +- API: Remove files and markers for Kubebuilder (#9344) +- API: Remove reliance on controller-runtime scheme builder (#9045) +- API: Remove reliance on controller-runtime scheme builder for experimental APIs (#9185) +- API: Remove reliance on controller-runtime scheme builder for remaining API groups (#9266) +- API: Remove the dependency on cluster-api/utils from addons API (#9482) +- API: Test and document controller ownerReferences (#9153) +- CAPBK: Remove Kubeadm upstream v1beta1 types (#9345) +- CAPD: Fix multi error handling in RunContainer (#9139) +- CI: Add colored-line-number output for golangci-lint action (#9147) +- CI: Add dependabot for test and hack/tools module (#9041) +- CI: Add exclude for Kustomize API to dependabot config (#9059) +- CI: Add licence-scan for pull requests (#9184) +- CI: Add loggercheck linter and fix findings (#9446) +- CI: Add verify-govulncheck and verify-vulnerabilities targets and integrate to scan action (#9144) +- CI: Bump actions/cache from 3.3.1 to 3.3.2 (#9395) +- CI: Bump actions/checkout from 4.1.0 to 4.1.1 (#9611) +- CI: Bump actions/setup-go from 4.0.1 to 4.1.0 (#9187) +- CI: Bump apidiff to v0.7.0 (#9472) +- CI: Bump golangci-lint to v1.54.1 (#9174) +- CI: Bump golangci/golangci-lint-action from 3.6.0 to 3.7.0 (#9261) +- CI: Bump tj-actions/changed-files from 39.2.4 to 40.0.0 (#9641) +- CI: Bump Trivy to v0.45.1 (#9445) +- CI: Fix .golangci.yml comments (#9499) +- CI: Ginkgolinter: forbid focus container (#9320) +- CI: Github: add edited and reopened as triggers for the GH workflow approval (#9259) +- CI: Github: add workflow to auto-approve golangci-lint if ok-to-test label is set (#9244) +- CI: Go.mod reformat to have only two require blocks (#9192) +- CI: Golangci-lint: replace deprecated local-prefixes setting for gci (#9339) +- CI: Make GO_ARCH explicit in verify_containter script (#9341) +- CI: Update actions for 1.5 and make names consistent (#9115) +- CI: Verify plantuml image generation in CI (#9363) +- ClusterCacheTracker: Add separate concurrency flag for cluster cache tracker (#9116) +- ClusterCacheTracker: Ensure Get/List calls are not getting stuck when apiserver is unreachable (#9028) +- ClusterCacheTracker: Fix accessor deletion on health check failure (#9025) +- ClusterClass: Add ownerRefs to BootstrapConfig/InfraMachinePool in classy Clusters (#9389) +- ClusterClass: Embed ssa.FilterObjectInput into HelperOption to remove duplication (#9512) +- ClusterClass: Fix some nits in Cluster topology engine tests (#9464) +- ClusterClass: Improve Cluster variable defaulting/validation errors (#9452) +- ClusterClass: Improve message for TopologyReconciledCondition (#9400) +- ClusterClass: Make ClusterClass generated object names consistent (#9254) +- ClusterClass: Minor fixes for CC+MP implementation (#9318) +- clusterctl: Check resource blocking clusterctl move during discovery (#9246) +- clusterctl: Use goproxy to check version in clusterctl (#9237) +- clusterctl: Use http get to download files from GitHub in clusterctl (#9236) +- Dependency: Bump cert-manager to v1.13.1 (#9505) +- Dependency: Bump cloud.google.com/go/storage from 1.32.0 to 1.33.0 in /hack/tools (#9423) +- Dependency: Bump controller tools to v1.13.0 (#9221) +- Dependency: Bump controller-runtime to v0.16.3 (#9592) +- Dependency: Bump conversion-gen to v0.28 (#9267) +- Dependency: Bump corefile-migration library to v1.0.21 (#9307) +- Dependency: Bump docker to v24.0.5 (#9064) +- Dependency: Bump envtest binaries to 1.28 (#9268) +- Dependency: Bump github.com/blang/semver to v4 (#9189) +- Dependency: Bump github.com/docker/distribution (#9544) +- Dependency: Bump github.com/docker/docker from 24.0.5+incompatible to 24.0.6+incompatible in /test (#9377) +- Dependency: Bump github.com/emicklei/go-restful/v3 from 3.10.2 to 3.11.0 in /test (#9272) +- Dependency: Bump github.com/evanphx/json-patch/v5 from 5.6.0 to 5.7.0 (#9397) +- Dependency: Bump github.com/go-logr/logr from 1.2.4 to 1.3.0 (#9644) +- Dependency: Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 (#9562) +- Dependency: Bump github.com/onsi/ginkgo/v2 from 2.12.1 to 2.13.0 (#9533) +- Dependency: Bump github.com/onsi/gomega from 1.28.1 to 1.29.0 (#9643) +- Dependency: Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 (#9517) +- Dependency: Bump github.com/spf13/viper from 1.16.0 to 1.17.0 (#9561) +- Dependency: Bump Go to v1.20.10 (#9551) +- Dependency: Bump go-github dependency to version v53 (#8995) +- Dependency: Bump go.etcd.io/etcd/api/v3 from 3.5.9 to 3.5.10 (#9642) +- Dependency: Bump go.etcd.io/etcd/client/v3 from 3.5.9 to 3.5.10 (#9646) +- Dependency: Bump go.opentelemetry.io/* dependencies (#9598) +- Dependency: Bump golang.org/grpc to v1.59.0 (#9626) +- Dependency: Bump golang.org/x/net from 0.15.0 to 0.17.0 in /test (#9537) +- Dependency: Bump golang.org/x/oauth2 from 0.12.0 to 0.13.0 (#9534) +- Dependency: Bump golang.org/x/text from 0.12.0 to 0.13.0 (#9370) +- Dependency: Bump gomodules.xyz/jsonpatch/v2 from 2.3.0 to 2.4.0 (#9188) +- Dependency: Bump google.golang.org/api from 0.146.0 to 0.148.0 in /hack/tools (#9581) +- Dependency: Bump google.golang.org/grpc from 1.58.2 to 1.58.3 (#9607) +- Dependency: Bump kpromo to v4.0.4 (#9241) +- Dependency: Bump sigs.k8s.io/yaml from 1.3.0 to 1.4.0 (#9645) +- Dependency: Bump some dependencies in Makefile (#9549) +- Dependency: Bump the kubernetes group in /hack/tools with 2 updates (#9420) +- Dependency: Bump the kubernetes group in /hack/tools with 2 updates (#9574) +- Dependency: Bump the kubernetes group with 2 updates (#9606) +- Dependency: Bump the kubernetes group with 4 updates (#9326) +- Dependency: Bump the kubernetes group with 4 updates (#9455) +- Dependency: Bump trivy to v0.46.0 (#9558) +- Dependency: Replace hashicorp/go-multierror with kerrors (#9175) +- Dependency: Update ensure-kubectl.sh to 1.28 (#9275) +- Devtools: Add .PHONY for doctoc target (#9148) +- Devtools: Bump CAPI visualizer to v1.2.0 (#9195) +- Devtools: Drop duplicate pprof and unused linter excludes (#9156) +- Devtools: Improve Performance dashboard (#9387) +- Devtools: Make dev cluster networking configurable (#9183) +- Devtools: Makefile: run doctoc only once (#9182) +- Devtools: Move tilt-prepare and log-push to tools/internal (#9020) +- Devtools: Observability: move metrics to config and use sidecar in kube-state-metrics (#9390) +- Devtools: Refactor docker-push* Makefile targets so users can control with ALL_DOCKER_BUILD which images are pushed (#8586) +- Devtools: Tiltfile: rename deploy_kustomizations to additional_kustomizations (#9439) +- Documentation: Add more links to release-tasks doc (#9029) +- Documentation: Extend docs for patch.NewHelper (#9001) +- Documentation: Fixed grammatically incorrect plurals in release tools (#9024) +- Documentation: Promote chrischdi to cluster-api reviewer (#9286) +- Documentation: Use official plantuml image for diagrams (#9328) +- e2e: Add back flavor to Waiter interface (#9166) +- e2e: Add CRS re-reconcile to ownerReference test (#9296) +- e2e: Add log level for kube components patch to ClusterClass (#9493) +- e2e: Add MachinePools to Topology Quickstart E2E Templates (#9393) +- e2e: Add test for ownerReference apiVersion update (#9269) +- e2e: Add test for scale testing machinery (#9510) +- e2e: Bump autoscaler to v1.28.0 (#9349) +- e2e: Drop PR-Informing test tag and job description (#9362) +- e2e: Dump all pods in e2e test clusters (#9441) +- e2e: Dump all resource information for self-hosted tests (#9547) +- e2e: Ensure finalizers are resilient on reconciliation (#9471) +- e2e: Fail tests if test env version check fails (#9388) +- e2e: Fix broken e2e test clusterclass (#9506) +- e2e: Improve labels/annotations in CAPD test ClusterClass (#9469) +- e2e: Refactor e2e ownerRef test utils (#9313) +- e2e: Test/e2e: structure resources by namespace/kind again (#9462) +- e2e: Use existing value of `SKIP_RESOURCE_CLEANUP` if set in environment (#9152) +- IPAM: Add age column to kubectl output (#9521) +- KCP: Controlplane: add a test case for syncMachines where the InfraMachine does not exist. (#8992) +- KCP: Remove disableInPlacePropagation field in KCP controller (#9099) +- KCP: Remove redundant GetRESTConfig in KCP Management.GetWorkloadCluster (#9448) +- Logging: Change default log level to 2 (#9093) +- Logging: Fix patch errors not being logged (#9224) +- Logging: Set controller-runtime logger for clusterctl (#9107) +- MachinePool: Add MachinePool Builders (#9346) +- MachinePool: Add util function to get MachinePool by label (#9219) +- Metrics: Hack/observability: add capi_machine_status_certificatesexpirydate metric (#9084) +- Metrics: Hack: bump kube-state-metrics and prometheus charts (#9352) +- Release: Add additional blocks to release note generation (#9247) +- Release: Add Release Team OWNERS file to docs/release folder (#9294) +- Release: Capitalize area prefix in release notes by default (#9614) +- Release: Capitalized title in release notes (#9086) +- Release: Clarify release team vs k8s/k8s-SIGs org membership (#9089) +- Release: Exclude release trigger PRs from release notes (#9444) +- Release: Format MachineHealthCheck area in release notes (#9500) +- Release: Improve multiple areas PRs with user friendly subs (#9071) +- Release: Improve release notes formatting (#9337) +- Release: Improve release speed (#9392) +- Release: Improve release staging build speed (#9536) +- Release: Prepare main branch for v1.6 development (#9097) +- Release: Remove auto-labelling for clusterctl (#8990) +- Release: Remove extra separator after title in release notes (#9605) +- Release: Update instructions checklist to generate release notes (#9443) +- Release: Update kubebuilder-release-tools to v0.4.0 (#9531) +- Release: Update release-notes make target + corresponding doc (#9573) +- Runtime SDK: Improve ClusterClass watch for ExtensionConfigs (#9338) +- Testing: Add MachinePool test cases (#9474) +- Testing: Add MachinePool test cases to engine tests (#9373) +- Testing: Add MachinePool test cases to variables tests (#9372) +- Testing: Add MachinePools to topology upgrade test (#9502) +- Testing: Add test for required properties in clusterclass variables (#9113) +- Testing: Add unit tests for CC MP blueprint, current_state, & desired_state (#9348) +- Testing: Add unit tests for reconcile_state, cluster_controller, & conditions (#9380) +- Testing: Extend test/framework to collect workload cluster nodes (#9416) +- Testing: Replacing gomega.Equal with gomega.BeComparableTo (#9015) +- util: Adjust naming in SortForCreate implementation (#9311) +- util: Move `internal.labels` to `format` package for use by providers (#9002) +- util: Refactor SortForCreate to use sort.Slice (#9251) +- util: Remove previously deprecated code (#9136) + +:book: Additionally, there have been 43 contributions to our documentation and book. (#8260, #8678, #8819, #8988, #9013, #9014, #9080, #9081, #9087, #9119, #9141, #9146, #9150, #9161, #9173, #9208, #9209, #9213, #9214, #9232, #9270, #9291, #9305, #9364, #9386, #9403, #9415, #9429, #9433, #9463, #9487, #9488, #9490, #9511, #9513, #9514, #9527, #9550, #9565, #9572, #9590, #9593, #9613) +
+
+ +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.6.0-rc.0.md b/CHANGELOG/v1.6.0-rc.0.md new file mode 100644 index 000000000000..851585a50a0d --- /dev/null +++ b/CHANGELOG/v1.6.0-rc.0.md @@ -0,0 +1,370 @@ +🚨 This is a RELEASE CANDIDATE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). + +## Highlights + +* MachinePools are now supported in ClusterClass Clusters +* Metrics, profiling and other diagnostics are now served securely by default +* Types in `ipam.cluster.x-k8s.io` have graduated to `v1beta1` + +## Deprecation Warning + +- The API version `v1alpha4` is no longer served in this release. + - [Version migration guide](https://main.cluster-api.sigs.k8s.io/developer/providers/version-migration.html) + - [GitHub issue #8038](https://github.com/kubernetes-sigs/cluster-api/issues/8038) + - [API deprecation details](https://main.cluster-api.sigs.k8s.io/contributing#removal-of-v1alpha3--v1alpha4-apiversions) +- The API version `v1alpha3` has been completely removed in this release. +- Flag `--metrics-bind-addr` is [deprecated](https://github.com/kubernetes-sigs/cluster-api/pull/9264) for all controller + +## Changes since v1.6.0-beta.1 +## :chart_with_upwards_trend: Overview +- 53 new commits merged +- 2 feature additions ✨ +- 5 bugs fixed 🐛 + +## :memo: Proposals +- Documentation: CAEP: Flexible Managed Kubernetes Endpoints (#8500) + +## :sparkles: New Features +- CAPD: Add MachinePool Machine implementation to CAPD components (#8842) +- clusterctl: Add RKE2 bootstrap provider to clusterctl (#9720) + +## :bug: Bug Fixes +- CAPD: Fix DockerMachine panic (#9673) +- CI: Fix reporting bug in verify-container-image script (#9676) +- clusterctl: Fix provider namespace secret not included in clusterctl move (#9694) +- KCP: Allow dropping patches KubeadmControlPlane KubeadmConfig (#9698) +- MachinePool: Fix bug where MachinePool Machine ownerRefs weren't updating (#9619) + +## :seedling: Others +- CAPD: Set Condition, if creating external LB failed. (#9697) +- CI: Bump actions/github-script from 7.0.0 to 7.0.1 (#9738) +- CI: Bump tj-actions/changed-files from 40.1.0 to 40.1.1 (#9714) +- ClusterClass: Add additional test cases for nested variable defaulting (#9728) +- clusterctl: Propagate ctx to retryWithExponentialBackoff in clusterctl (#9437) +- Dependency: Bump cert-manager to v1.13.2 (#9653) +- Dependency: Bump cloud.google.com/go/storage from 1.34.1 to 1.35.1 in /hack/tools (#9727) +- Dependency: Bump github.com/docker/docker from 24.0.6+incompatible to 24.0.7+incompatible in /test (#9652) +- Dependency: Bump github.com/fatih/color from 1.15.0 to 1.16.0 (#9681) +- Dependency: Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.1 (#9716) +- Dependency: Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#9717) +- Dependency: Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 (#9679) +- Dependency: Bump Go version to v1.20.11 (#9683) +- Dependency: Bump golang.org/x/oauth2 from 0.13.0 to 0.14.0 (#9715) +- Dependency: Bump golang.org/x/text from 0.13.0 to 0.14.0 (#9680) +- Dependency: Bump golangci-lint to v1.55.2 (#9740) +- Dependency: Bump google.golang.org/api from 0.149.0 to 0.150.0 in /hack/tools (#9691) +- Dependency: Bump opentelemetry to fix CVEs (#9709) +- Dependency: Bump the kubernetes group with 4 updates (#9739) +- Dependency: Bump Trivy to v0.47.0 (#9671) +- Devtools: Allow non-provider Deployments in Tilt (#9404) +- Documentation: Add k3s providers to cluster-api book (#9577) +- Documentation: Fix typo in 'ginkgo' in Makefile (#9654) +- e2e: Add MP timeouts to cluster upgrade tests (#9723) +- e2e: Improve logging for self-hosted e2e test (#9637) +- e2e: Test/conformance: add variables to overwrite parralelization of kubetest runs (#9667) +- IPAM: Add age also to v1beta1 IPAM types (#9729) +- IPAM: Enforce IPAM import alias (#9730) +- KCP: Support admin config for Kubeadm v1.29 (#9682) +- Release: Add adilGhaffarDev to release-team owner alias (#9718) +- Release: Avoid cd'ing into test folders when building Docker images (#9744) +- Release: Bump kubebuilder tools to v0.4.2 (#9665) +- Release: Give permissions for release notes tool to release team (#9563) +- Release: Hack: add weekly update script for Slack (#9343) +- Testing: Add MachinePools to Runtime SDK and Rollout tests (#9703) +- Testing: Add WaitForMachinePoolToBeUpgraded to self-hosted test (#9540) +- Testing: Follow-up fixes: Add MachinePools to Runtime SDK and Rollout tests (#9719) + +:book: Additionally, there have been 3 contributions to our documentation and book. (#9559, #9635, #9706) + +
+More details about the release + +:warning: **RELEASE CANDIDATE NOTES** :warning: + +## 👌 Kubernetes version support + +- Management Cluster: v1.25.x -> v1.28.x +- Workload Cluster: v1.23.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.0 +## :chart_with_upwards_trend: Overview +- 382 new commits merged +- 6 breaking changes :warning: +- 15 feature additions ✨ +- 34 bugs fixed 🐛 + +## :memo: Proposals +- Community meeting: Add proposal for karpenter integration feature group (#9571) +- Documentation: Flexible Managed Kubernetes Endpoints (#8500) + +## :warning: Breaking Changes +- API: Remove v1alpha3 API Version (#8997) +- API: Stop serving v1alpha4 API Versions (#8996) +- clusterctl: Improve Context handling in clusterctl (#8939) +- Dependency: Bump to controller-runtime v0.16 (#8999) +- Metrics/Logging: Implement secure diagnostics (metrics, pprof, log level changes) (#9264) +- util: : Remove go-vcs dependency from releaselink tool (#9288) + +## :sparkles: New Features +- API: Add validation to nested ObjectMeta fields (#8431) +- CAPD: Add config maps to CAPD RBAC (#9528) +- CAPD: Add MachinePool Machine implementation to CAPD components (#8842) +- CAPD: Allow adding custom HA proxy config for CAPD load balancer (#8785) +- CAPD: Initialize configmap object before getting it (#9529) +- ClusterClass: Add topology-owned label to MachineHealthChecks. (#9191) +- ClusterClass: Introduce NamingStrategy and allow generating names using go templates (#9340) +- ClusterClass: Update API with ClusterClass MachinePool support (#8820) +- clusterctl: Add RKE2 bootstrap provider to clusterctl (#9720) +- clusterctl: Block move with annotation (#8690) +- IPAM: Promote IPAM types to v1beta1 (#9525) +- MachinePool: Add MachinePool workers support in ClusterClass (#9016) +- MachineSet: Adjust preflight check to allow kubelet version skew of 3 for clusters running v1.28 and above (#9222) +- Testing/Documentation: V1.28: Prepare quickstart, capd and tests for the new release including kind bump (#9160) +- Release: Add automation to create release branch and tags (#9111) + +## :bug: Bug Fixes +- CAPBK: Certificate paths in cloud-init scripts should not use a platform-dependent path separator (#9167) +- CAPD: Delete container after failed start to work around port allocation issues (#9125) +- CAPD: Fix DockerMachine panic (#9673) +- CI: Fix reporting bug in verify-container-image script (#9676) +- ClusterClass: Fix ClusterClass enqueue for ExtensionConfig (#9133) +- ClusterClass: Topology: fix namingstrategy webhook to not use uppercase characters for testing the template and align unit test to e2e test (#9425) +- clusterctl: Fix provider namespace secret not included in clusterctl move (#9694) +- Dependency: Bump golang.org/x/net to v0.13.0 (#9121) +- Dependency: Bump to docker v24.0.5-0.20230714235725-36e9e796c6fc (#9038) +- Devtools: Adding metrics container port in tilt-prepare only if it's missing (#9308) +- Devtools: Allow duplicate objects in Tiltfile (#9302) +- Devtools: Change tilt debug base image to golang (#9070) +- Devtools: Fix tilt-prepare leader-elect setting (#9315) +- Devtools: Pin Plantuml version (#9424) +- Devtools: Tilt: ensure .tiltbuild/bin directory is created early enough, add tilt troubleshooting guide (#9165) +- Documentation: Fix doctoc detection in verify-doctoc.sh (#9112) +- e2e: Drop MachinePools from Dualstack tests (#9477) +- e2e: Fix autoscaler image repo (#9353) +- e2e: Test: pin conformance image to a version which includes a fix for the dualstack tests (#9252) +- KCP: Allow dropping patches KubeadmControlPlane KubeadmConfig (#9698) +- KCP: Allow to drop useExperimentalRetryJoin field from KubeadmControlPlane.kubeadmConfigSpec (#9170) +- KCP: Fix KCP Controller reconcile always return error when workload cluster is unreachable (#9342) +- KCP: Requeue KCP object if ControlPlaneComponentsHealthyCondition is not yet true (#9032) +- Machine: Retry Node delete when CCT is locked (#9570) +- MachineDeployment: MD controller: use regular random suffix for MachineSets, ensure max length 63 (#9298) +- MachineHealthCheck: Fix excessive trace logging in the machine health check controller (#9419) +- MachinePool: Fix bug where MachinePool Machine ownerRefs weren't updating (#9619) +- Release: Alphabetically sorting release tool output (#9055) +- Release: Deduplicating area in pr title in release notes (#9186) +- Release: Fix go install path for kpromo v4.0.4 (#9336) +- Release: Generate warning when release notes can not be generated (#9163) +- Release: Hack/release-notes: ensure relase notes tool can be used for external projects again (#9018) +- Release: Reverts pull request from cahillsf/improve-release-speed (#9465) +- util: Fix AddAnnotations for unstructured.Unstructured (#9164) + +## :seedling: Others +- API: Add ClusterClass column to Cluster CRD (#9120) +- API: Add verify-import-restrictions to enforce import restrictions (#9407) +- API: Enforce import restrictions in all API packages (#9461) +- API: Ensure we generate all webhook manifests.yaml (#9621) +- API: Move API v1beta1 webhooks to a separate package (#9047) +- API: Move docker infrastructure experimental API v1beta1 webhooks to sepa… (#9460) +- API: Move docker infrastructure API v1beta1 webhooks to separate package (#9458) +- API: Move experimental addons API v1beta1 webhooks to separate package (#9438) +- API: Move experimental API v1beta1 webhooks to separate package (#9417) +- API: Move inmemory infrastructure API v1beta1 webhooks to separate package (#9459) +- API: Move Kubeadm API v1beta1 webhooks to separate package (#9410) +- API: Remove files and markers for Kubebuilder (#9344) +- API: Remove reliance on controller-runtime scheme builder (#9045) +- API: Remove reliance on controller-runtime scheme builder for experimental APIs (#9185) +- API: Remove reliance on controller-runtime scheme builder for remaining API groups (#9266) +- API: Remove the dependency on cluster-api/utils from addons API (#9482) +- API: Test and document controller ownerReferences (#9153) +- CAPBK: Remove Kubeadm upstream v1beta1 types (#9345) +- CAPD: Fix multi error handling in RunContainer (#9139) +- CAPD: Set Condition, if creating external LB failed. (#9697) +- CI: Add colored-line-number output for golangci-lint action (#9147) +- CI: Add dependabot for test and hack/tools module (#9041) +- CI: Add exclude for Kustomize API to dependabot config (#9059) +- CI: Add licence-scan for pull requests (#9184) +- CI: Add loggercheck linter and fix findings (#9446) +- CI: Add verify-govulncheck and verify-vulnerabilities targets and integrate to scan action (#9144) +- CI: Bump actions/cache from 3.3.1 to 3.3.2 (#9395) +- CI: Bump actions/checkout from 4.1.0 to 4.1.1 (#9611) +- CI: Bump actions/github-script from 7.0.0 to 7.0.1 (#9738) +- CI: Bump actions/setup-go from 4.0.1 to 4.1.0 (#9187) +- CI: Bump apidiff to v0.7.0 (#9472) +- CI: Bump golangci-lint to v1.54.1 (#9174) +- CI: Bump golangci/golangci-lint-action from 3.6.0 to 3.7.0 (#9261) +- CI: Bump tj-actions/changed-files from 40.1.0 to 40.1.1 (#9714) +- CI: Bump Trivy to v0.45.1 (#9445) +- CI: Fix .golangci.yml comments (#9499) +- CI: Ginkgolinter: forbid focus container (#9320) +- CI: Github: add edited and reopened as triggers for the GH workflow approval (#9259) +- CI: Github: add workflow to auto-approve golangci-lint if ok-to-test label is set (#9244) +- CI: Go.mod reformat to have only two require blocks (#9192) +- CI: Golangci-lint: replace deprecated local-prefixes setting for gci (#9339) +- CI: Make GO_ARCH explicit in verify_containter script (#9341) +- CI: Update actions for 1.5 and make names consistent (#9115) +- CI: Verify plantuml image generation in CI (#9363) +- ClusterCacheTracker: Add separate concurrency flag for cluster cache tracker (#9116) +- ClusterCacheTracker: Ensure Get/List calls are not getting stuck when apiserver is unreachable (#9028) +- ClusterCacheTracker: Fix accessor deletion on health check failure (#9025) +- ClusterClass: Add additional test cases for nested variable defaulting (#9728) +- ClusterClass: Add ownerRefs to BootstrapConfig/InfraMachinePool in classy Clusters (#9389) +- ClusterClass: Embed ssa.FilterObjectInput into HelperOption to remove duplication (#9512) +- ClusterClass: Fix some nits in Cluster topology engine tests (#9464) +- ClusterClass: Improve Cluster variable defaulting/validation errors (#9452) +- ClusterClass: Improve message for TopologyReconciledCondition (#9400) +- ClusterClass: Make ClusterClass generated object names consistent (#9254) +- ClusterClass: Minor fixes for CC+MP implementation (#9318) +- clusterctl: Check resource blocking clusterctl move during discovery (#9246) +- clusterctl: Propagate ctx to retryWithExponentialBackoff in clusterctl (#9437) +- clusterctl: Use goproxy to check version in clusterctl (#9237) +- clusterctl: Use http get to download files from GitHub in clusterctl (#9236) +- Dependency: Bump cert-manager to v1.13.2 (#9653) +- Dependency: Bump cloud.google.com/go/storage from 1.34.1 to 1.35.1 in /hack/tools (#9727) +- Dependency: Bump controller tools to v1.13.0 (#9221) +- Dependency: Bump controller-runtime to v0.16.3 (#9592) +- Dependency: Bump conversion-gen to v0.28 (#9267) +- Dependency: Bump corefile-migration library to v1.0.21 (#9307) +- Dependency: Bump docker to v24.0.5 (#9064) +- Dependency: Bump envtest binaries to 1.28 (#9268) +- Dependency: Bump github.com/blang/semver to v4 (#9189) +- Dependency: Bump github.com/docker/distribution (#9544) +- Dependency: Bump github.com/docker/docker from 24.0.6+incompatible to 24.0.7+incompatible in /test (#9652) +- Dependency: Bump github.com/emicklei/go-restful/v3 from 3.10.2 to 3.11.0 in /test (#9272) +- Dependency: Bump github.com/evanphx/json-patch/v5 from 5.6.0 to 5.7.0 (#9397) +- Dependency: Bump github.com/fatih/color from 1.15.0 to 1.16.0 (#9681) +- Dependency: Bump github.com/go-logr/logr from 1.2.4 to 1.3.0 (#9644) +- Dependency: Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 (#9562) +- Dependency: Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.1 (#9716) +- Dependency: Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#9717) +- Dependency: Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 (#9517) +- Dependency: Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 (#9679) +- Dependency: Bump github.com/spf13/viper from 1.16.0 to 1.17.0 (#9561) +- Dependency: Bump Go to v1.20.10 (#9551) +- Dependency: Bump Go version to v1.20.11 (#9683) +- Dependency: Bump go-github dependency to version v53 (#8995) +- Dependency: Bump go.etcd.io/etcd/api/v3 from 3.5.9 to 3.5.10 (#9642) +- Dependency: Bump go.etcd.io/etcd/client/v3 from 3.5.9 to 3.5.10 (#9646) +- Dependency: Bump go.opentelemetry.io/* dependencies (#9598) +- Dependency: Bump golang.org/grpc to v1.59.0 (#9626) +- Dependency: Bump golang.org/x/net from 0.15.0 to 0.17.0 in /test (#9537) +- Dependency: Bump golang.org/x/oauth2 from 0.13.0 to 0.14.0 (#9715) +- Dependency: Bump golang.org/x/text from 0.13.0 to 0.14.0 (#9680) +- Dependency: Bump golangci-lint to v1.55.2 (#9740) +- Dependency: Bump gomodules.xyz/jsonpatch/v2 from 2.3.0 to 2.4.0 (#9188) +- Dependency: Bump google.golang.org/api from 0.149.0 to 0.150.0 in /hack/tools (#9691) +- Dependency: Bump google.golang.org/grpc from 1.58.2 to 1.58.3 (#9607) +- Dependency: Bump kpromo to v4.0.4 (#9241) +- Dependency: Bump opentelemetry to fix CVEs (#9709) +- Dependency: Bump sigs.k8s.io/yaml from 1.3.0 to 1.4.0 (#9645) +- Dependency: Bump some dependencies in Makefile (#9549) +- Dependency: Bump the kubernetes group in /hack/tools with 2 updates (#9420) +- Dependency: Bump the kubernetes group in /hack/tools with 2 updates (#9574) +- Dependency: Bump the kubernetes group with 2 updates (#9606) +- Dependency: Bump the kubernetes group with 4 updates (#9326) +- Dependency: Bump the kubernetes group with 4 updates (#9455) +- Dependency: Bump the kubernetes group with 4 updates (#9739) +- Dependency: Bump to Go 1.20.8 (#9381) +- Dependency: Bump Trivy to v0.47.0 (#9671) +- Dependency: Replace hashicorp/go-multierror with kerrors (#9175) +- Dependency: Update ensure-kubectl.sh to 1.28 (#9275) +- Devtools: Add .PHONY for doctoc target (#9148) +- Devtools: Allow non-provider Deployments in Tilt (#9404) +- Devtools: Bump CAPI visualizer to v1.2.0 (#9195) +- Devtools: Drop duplicate pprof and unused linter excludes (#9156) +- Devtools: Improve Performance dashboard (#9387) +- Devtools: Make dev cluster networking configurable (#9183) +- Devtools: Makefile: run doctoc only once (#9182) +- Devtools: Move tilt-prepare and log-push to tools/internal (#9020) +- Devtools: Observability: move metrics to config and use sidecar in kube-state-metrics (#9390) +- Devtools: Refactor docker-push* Makefile targets so users can control with ALL_DOCKER_BUILD which images are pushed (#8586) +- Devtools: Tiltfile: rename deploy_kustomizations to additional_kustomizations (#9439) +- Documentation: Add k3s providers to cluster-api book (#9577) +- Documentation: Add more links to release-tasks doc (#9029) +- Documentation: Extend docs for patch.NewHelper (#9001) +- Documentation: Fix typo in 'ginkgo' in Makefile (#9654) +- Documentation: Fixed grammatically incorrect plurals in release tools (#9024) +- Documentation: Promote chrischdi to cluster-api reviewer (#9286) +- Documentation: Use official plantuml image for diagrams (#9328) +- e2e: Add back flavor to Waiter interface (#9166) +- e2e: Add CRS re-reconcile to ownerReference test (#9296) +- e2e: Add log level for kube components patch to ClusterClass (#9493) +- e2e: Add MachinePools to Topology Quickstart E2E Templates (#9393) +- e2e: Add MP timeouts to cluster upgrade tests (#9723) +- e2e: Add test for ownerReference apiVersion update (#9269) +- e2e: Add test for scale testing machinery (#9510) +- e2e: Bump autoscaler to v1.28.0 (#9349) +- e2e: Drop PR-Informing test tag and job description (#9362) +- e2e: Dump all pods in e2e test clusters (#9441) +- e2e: Dump all resource information for self-hosted tests (#9547) +- e2e: Ensure finalizers are resilient on reconciliation (#9471) +- e2e: Fail tests if test env version check fails (#9388) +- e2e: Fix broken e2e test clusterclass (#9506) +- e2e: Improve labels/annotations in CAPD test ClusterClass (#9469) +- e2e: Improve logging for self-hosted e2e test (#9637) +- e2e: Refactor e2e ownerRef test utils (#9313) +- e2e: Test/conformance: add variables to overwrite parralelization of kubetest runs (#9667) +- e2e: Test/e2e: structure resources by namespace/kind again (#9462) +- e2e: Use existing value of `SKIP_RESOURCE_CLEANUP` if set in environment (#9152) +- IPAM: Add age also to v1beta1 IPAM types (#9729) +- IPAM: Add age column to kubectl output (#9521) +- IPAM: Enforce IPAM import alias (#9730) +- KCP: Controlplane: add a test case for syncMachines where the InfraMachine does not exist. (#8992) +- KCP: Remove disableInPlacePropagation field in KCP controller (#9099) +- KCP: Remove redundant GetRESTConfig in KCP Management.GetWorkloadCluster (#9448) +- KCP: Support admin config for Kubeadm v1.29 (#9682) +- Logging: Change default log level to 2 (#9093) +- Logging: Fix patch errors not being logged (#9224) +- Logging: Set controller-runtime logger for clusterctl (#9107) +- MachinePool: Add MachinePool Builders (#9346) +- MachinePool: Add util function to get MachinePool by label (#9219) +- Metrics: Hack/observability: add capi_machine_status_certificatesexpirydate metric (#9084) +- Metrics: Hack: bump kube-state-metrics and prometheus charts (#9352) +- Release: Add additional blocks to release note generation (#9247) +- Release: Add adilGhaffarDev to release-team owner alias (#9718) +- Release: Add Release Team OWNERS file to docs/release folder (#9294) +- Release: Avoid cd'ing into test folders when building Docker images (#9744) +- Release: Bump kubebuilder tools to v0.4.2 (#9665) +- Release: Bump PR title checker github action to v0.4.1 (#9650) +- Release: Capitalize area prefix in release notes by default (#9614) +- Release: Capitalized title in release notes (#9086) +- Release: Clarify release team vs k8s/k8s-SIGs org membership (#9089) +- Release: Exclude release trigger PRs from release notes (#9444) +- Release: Format Community meeting area in release notes (#9648) +- Release: Format MachineHealthCheck area in release notes (#9500) +- Release: Give permissions for release notes tool to release team (#9563) +- Release: Hack: add weekly update script for Slack (#9343) +- Release: Improve multiple areas PRs with user friendly subs (#9071) +- Release: Improve release notes formatting (#9337) +- Release: Improve release speed (#9392) +- Release: Improve release staging build speed (#9536) +- Release: Prepare main branch for v1.6 development (#9097) +- Release: Remove auto-labelling for clusterctl (#8990) +- Release: Remove extra separator after title in release notes (#9605) +- Release: Update instructions checklist to generate release notes (#9443) +- Release: Update kubebuilder-release-tools to v0.4.0 (#9531) +- Release: Update release-notes make target + corresponding doc (#9573) +- Runtime SDK: Improve ClusterClass watch for ExtensionConfigs (#9338) +- Testing: Add MachinePool test cases (#9474) +- Testing: Add MachinePool test cases to engine tests (#9373) +- Testing: Add MachinePool test cases to variables tests (#9372) +- Testing: Add MachinePools to Runtime SDK and Rollout tests (#9703) +- Testing: Add MachinePools to topology upgrade test (#9502) +- Testing: Add test for required properties in clusterclass variables (#9113) +- Testing: Add unit tests for CC MP blueprint, current_state, & desired_state (#9348) +- Testing: Add unit tests for reconcile_state, cluster_controller, & conditions (#9380) +- Testing: Add WaitForMachinePoolToBeUpgraded to self-hosted test (#9540) +- Testing: Extend test/framework to collect workload cluster nodes (#9416) +- Testing: Follow-up fixes: Add MachinePools to Runtime SDK and Rollout tests (#9719) +- Testing: Replacing gomega.Equal with gomega.BeComparableTo (#9015) +- util: Adjust naming in SortForCreate implementation (#9311) +- util: Move `internal.labels` to `format` package for use by providers (#9002) +- util: Refactor SortForCreate to use sort.Slice (#9251) +- util: Remove previously deprecated code (#9136) + +:book: Additionally, there have been 46 contributions to our documentation and book. (#8260, #8678, #8819, #8988, #9013, #9014, #9080, #9081, #9087, #9119, #9141, #9146, #9150, #9161, #9173, #9208, #9209, #9213, #9214, #9232, #9270, #9291, #9305, #9364, #9386, #9403, #9415, #9429, #9433, #9463, #9487, #9488, #9490, #9511, #9513, #9514, #9527, #9550, #9559, #9565, #9572, #9590, #9593, #9613, #9635, #9706) +
+
+ +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.6.0-rc.1.md b/CHANGELOG/v1.6.0-rc.1.md new file mode 100644 index 000000000000..32b10b8b4411 --- /dev/null +++ b/CHANGELOG/v1.6.0-rc.1.md @@ -0,0 +1,314 @@ +🚨 This is a RELEASE CANDIDATE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). + +## Highlights + +* MachinePools are now supported in ClusterClass Clusters +* Metrics, profiling and other diagnostics are now served securely by default +* Types in `ipam.cluster.x-k8s.io` have graduated to `v1beta1` + +## Deprecation Warning + +- The API version `v1alpha4` is no longer served in this release. + - [Version migration guide](https://main.cluster-api.sigs.k8s.io/developer/providers/version-migration.html) + - [GitHub issue #8038](https://github.com/kubernetes-sigs/cluster-api/issues/8038) + - [API deprecation details](https://main.cluster-api.sigs.k8s.io/contributing#removal-of-v1alpha3--v1alpha4-apiversions) +- The API version `v1alpha3` has been completely removed in this release. +- Flag `--metrics-bind-addr` is [deprecated](https://github.com/kubernetes-sigs/cluster-api/pull/9264) for all controllers + +## Changes since v1.6.0-rc.0 +## :chart_with_upwards_trend: Overview +- 2 new commits merged +- 1 bug fixed 🐛 + +## :bug: Bug Fixes +- ClusterResourceSet: Requeue after 1 minute if ErrClusterLocked got hit (#9788) + +## :seedling: Others +- e2e: Improve output of exec.KubectlApply (#9761) + +
+More details about the release + +:warning: **RELEASE CANDIDATE NOTES** :warning: + +## 👌 Kubernetes version support + +- Management Cluster: v1.25.x -> v1.28.x +- Workload Cluster: v1.23.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.5.0 +## :chart_with_upwards_trend: Overview +- 387 new commits merged +- 6 breaking changes :warning: +- 15 feature additions ✨ +- 35 bugs fixed 🐛 + +## :memo: Proposals +- Community meeting: Add proposal for karpenter integration feature group (#9571) + +## :warning: Breaking Changes +- API: Remove v1alpha3 API Version (#8997) +- API: Stop serving v1alpha4 API Versions (#8996) +- clusterctl: Improve Context handling in clusterctl (#8939) +- Dependency: Bump to controller-runtime v0.16 (#8999) +- Metrics/Logging: Implement secure diagnostics (metrics, pprof, log level changes) (#9264) +- util: Remove go-vcs dependency from releaselink tool (#9288) + +## :sparkles: New Features +- API: Add validation to nested ObjectMeta fields (#8431) +- CAPD: Add config maps to CAPD RBAC (#9528) +- CAPD: Add MachinePool Machine implementation to CAPD components (#8842) +- CAPD: Allow adding custom HA proxy config for CAPD load balancer (#8785) +- CAPD: Initialize configmap object before getting it (#9529) +- ClusterClass: Add topology-owned label to MachineHealthChecks. (#9191) +- ClusterClass: Introduce NamingStrategy and allow generating names using go templates (#9340) +- ClusterClass: Update API with ClusterClass MachinePool support (#8820) +- clusterctl: Add RKE2 bootstrap provider to clusterctl (#9720) +- clusterctl: Block move with annotation (#8690) +- IPAM: Promote IPAM types to v1beta1 (#9525) +- MachinePool: Add MachinePool workers support in ClusterClass (#9016) +- MachineSet: Adjust preflight check to allow kubelet version skew of 3 for clusters running v1.28 and above (#9222) +- Release: Add automation to create release branch and tags (#9111) +- Testing/Documentation: V1.28: Prepare quickstart, capd and tests for the new release including kind bump (#9160) + +## :bug: Bug Fixes +- CAPBK: Certificate paths in cloud-init scripts should not use a platform-dependent path separator (#9167) +- CAPD: Delete container after failed start to work around port allocation issues (#9125) +- CAPD: Fix DockerMachine panic (#9673) +- CI: Fix reporting bug in verify-container-image script (#9676) +- ClusterClass: Fix ClusterClass enqueue for ExtensionConfig (#9133) +- ClusterClass: Topology: fix namingstrategy webhook to not use uppercase characters for testing the template and align unit test to e2e test (#9425) +- clusterctl: Fix provider namespace secret not included in clusterctl move (#9694) +- ClusterResourceSet: Requeue after 1 minute if ErrClusterLocked got hit (#9788) +- Dependency: Bump golang.org/x/net to v0.13.0 (#9121) +- Dependency: Bump to docker v24.0.5-0.20230714235725-36e9e796c6fc (#9038) +- Devtools: Adding metrics container port in tilt-prepare only if it's missing (#9308) +- Devtools: Allow duplicate objects in Tiltfile (#9302) +- Devtools: Change tilt debug base image to golang (#9070) +- Devtools: Fix tilt-prepare leader-elect setting (#9315) +- Devtools: Pin Plantuml version (#9424) +- Devtools: Tilt: ensure .tiltbuild/bin directory is created early enough, add tilt troubleshooting guide (#9165) +- e2e: Drop MachinePools from Dualstack tests (#9477) +- e2e: Fix autoscaler image repo (#9353) +- e2e: Test: pin conformance image to a version which includes a fix for the dualstack tests (#9252) +- KCP: Allow dropping patches KubeadmControlPlane KubeadmConfig (#9698) +- KCP: Allow to drop useExperimentalRetryJoin field from KubeadmControlPlane.kubeadmConfigSpec (#9170) +- KCP: Fix KCP Controller reconcile always return error when workload cluster is unreachable (#9342) +- KCP: Requeue KCP object if ControlPlaneComponentsHealthyCondition is not yet true (#9032) +- Machine: Retry Node delete when CCT is locked (#9570) +- MachineDeployment: MD controller: use regular random suffix for MachineSets, ensure max length 63 (#9298) +- MachineHealthCheck: Fix excessive trace logging in the machine health check controller (#9419) +- MachinePool: Fix bug where MachinePool Machine ownerRefs weren't updating (#9619) +- Release: Alphabetically sorting release tool output (#9055) +- Release: Deduplicating area in pr title in release notes (#9186) +- Release: Fix go install path for kpromo v4.0.4 (#9336) +- Release: Generate warning when release notes can not be generated (#9163) +- Release: Hack/release-notes: ensure relase notes tool can be used for external projects again (#9018) +- Release: Reverts pull request from cahillsf/improve-release-speed (#9465) +- Runtime SDK: Set User Agent for test extension correctly (#9748) +- util: Fix AddAnnotations for unstructured.Unstructured (#9164) + +## :seedling: Others +- API: Add ClusterClass column to Cluster CRD (#9120) +- API: Add verify-import-restrictions to enforce import restrictions (#9407) +- API: Enforce import restrictions in all API packages (#9461) +- API: Ensure we generate all webhook manifests.yaml (#9621) +- API: Move API v1beta1 webhooks to a separate package (#9047) +- API: Move docker infrastructure API v1beta1 webhooks to separate package (#9458) +- API: Move docker infrastructure experimental API v1beta1 webhooks to separate package (#9460) +- API: Move experimental addons API v1beta1 webhooks to separate package (#9438) +- API: Move experimental API v1beta1 webhooks to separate package (#9417) +- API: Move inmemory infrastructure API v1beta1 webhooks to separate package (#9459) +- API: Move Kubeadm API v1beta1 webhooks to separate package (#9410) +- API: Remove files and markers for Kubebuilder (#9344) +- API: Remove reliance on controller-runtime scheme builder (#9045) +- API: Remove reliance on controller-runtime scheme builder for experimental APIs (#9185) +- API: Remove reliance on controller-runtime scheme builder for remaining API groups (#9266) +- API: Remove the dependency on cluster-api/utils from addons API (#9482) +- API: Test and document controller ownerReferences (#9153) +- CAPBK: Remove Kubeadm upstream v1beta1 types (#9345) +- CAPD: Fix multi error handling in RunContainer (#9139) +- CAPD: Set Condition, if creating external LB failed. (#9697) +- CI: Add colored-line-number output for golangci-lint action (#9147) +- CI: Add dependabot for test and hack/tools module (#9041) +- CI: Add exclude for Kustomize API to dependabot config (#9059) +- CI: Add licence-scan for pull requests (#9184) +- CI: Add loggercheck linter and fix findings (#9446) +- CI: Add verify-govulncheck and verify-vulnerabilities targets and integrate to scan action (#9144) +- CI: Bump actions/cache from 3.3.1 to 3.3.2 (#9395) +- CI: Bump actions/checkout from 4.1.0 to 4.1.1 (#9611) +- CI: Bump actions/github-script from 7.0.0 to 7.0.1 (#9738) +- CI: Bump actions/setup-go from 4.0.1 to 4.1.0 (#9187) +- CI: Bump apidiff to v0.7.0 (#9472) +- CI: Bump golangci-lint to v1.54.1 (#9174) +- CI: Bump golangci/golangci-lint-action from 3.6.0 to 3.7.0 (#9261) +- CI: Bump tj-actions/changed-files from 40.1.0 to 40.1.1 (#9714) +- CI: Bump Trivy to v0.45.1 (#9445) +- CI: Fix .golangci.yml comments (#9499) +- CI: Ginkgolinter: forbid focus container (#9320) +- CI: Github: add edited and reopened as triggers for the GH workflow approval (#9259) +- CI: Github: add workflow to auto-approve golangci-lint if ok-to-test label is set (#9244) +- CI: Go.mod reformat to have only two require blocks (#9192) +- CI: Golangci-lint: replace deprecated local-prefixes setting for gci (#9339) +- CI: Make GO_ARCH explicit in verify_containter script (#9341) +- CI: Update actions for 1.5 and make names consistent (#9115) +- CI: Verify plantuml image generation in CI (#9363) +- ClusterCacheTracker: Add separate concurrency flag for cluster cache tracker (#9116) +- ClusterCacheTracker: Ensure Get/List calls are not getting stuck when apiserver is unreachable (#9028) +- ClusterCacheTracker: Fix accessor deletion on health check failure (#9025) +- ClusterClass: Add additional test cases for nested variable defaulting (#9728) +- ClusterClass: Add ownerRefs to BootstrapConfig/InfraMachinePool in classy Clusters (#9389) +- ClusterClass: Embed ssa.FilterObjectInput into HelperOption to remove duplication (#9512) +- ClusterClass: Fix some nits in Cluster topology engine tests (#9464) +- ClusterClass: Improve Cluster variable defaulting/validation errors (#9452) +- ClusterClass: Improve message for TopologyReconciledCondition (#9400) +- ClusterClass: Make ClusterClass generated object names consistent (#9254) +- ClusterClass: Minor fixes for CC+MP implementation (#9318) +- clusterctl: Check resource blocking clusterctl move during discovery (#9246) +- clusterctl: Propagate ctx to retryWithExponentialBackoff in clusterctl (#9437) +- clusterctl: Use goproxy to check version in clusterctl (#9237) +- clusterctl: Use http get to download files from GitHub in clusterctl (#9236) +- Dependency: Bump cert-manager to v1.13.2 (#9653) +- Dependency: Bump cloud.google.com/go/storage from 1.34.1 to 1.35.1 in /hack/tools (#9727) +- Dependency: Bump controller tools to v1.13.0 (#9221) +- Dependency: Bump controller-runtime to v0.16.3 (#9592) +- Dependency: Bump conversion-gen to v0.28 (#9267) +- Dependency: Bump corefile-migration library to v1.0.21 (#9307) +- Dependency: Bump docker to v24.0.5 (#9064) +- Dependency: Bump envtest binaries to 1.28 (#9268) +- Dependency: Bump github.com/blang/semver to v4 (#9189) +- Dependency: Bump github.com/docker/distribution (#9544) +- Dependency: Bump github.com/docker/docker from 24.0.6+incompatible to 24.0.7+incompatible in /test (#9652) +- Dependency: Bump github.com/emicklei/go-restful/v3 from 3.10.2 to 3.11.0 in /test (#9272) +- Dependency: Bump github.com/evanphx/json-patch/v5 from 5.6.0 to 5.7.0 (#9397) +- Dependency: Bump github.com/fatih/color from 1.15.0 to 1.16.0 (#9681) +- Dependency: Bump github.com/go-logr/logr from 1.2.4 to 1.3.0 (#9644) +- Dependency: Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 (#9562) +- Dependency: Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.1 (#9716) +- Dependency: Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#9717) +- Dependency: Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 (#9517) +- Dependency: Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 (#9679) +- Dependency: Bump github.com/spf13/viper from 1.16.0 to 1.17.0 (#9561) +- Dependency: Bump Go version to v1.20.11 (#9683) +- Dependency: Bump go-github dependency to version v53 (#8995) +- Dependency: Bump go.etcd.io/etcd/api/v3 from 3.5.9 to 3.5.10 (#9642) +- Dependency: Bump go.etcd.io/etcd/client/v3 from 3.5.9 to 3.5.10 (#9646) +- Dependency: Bump go.opentelemetry.io/* dependencies (#9598) +- Dependency: Bump golang.org/grpc to v1.59.0 (#9626) +- Dependency: Bump golang.org/x/net from 0.15.0 to 0.17.0 in /test (#9537) +- Dependency: Bump golang.org/x/oauth2 from 0.13.0 to 0.14.0 (#9715) +- Dependency: Bump golang.org/x/text from 0.13.0 to 0.14.0 (#9680) +- Dependency: Bump golangci-lint to v1.55.2 (#9740) +- Dependency: Bump gomodules.xyz/jsonpatch/v2 from 2.3.0 to 2.4.0 (#9188) +- Dependency: Bump google.golang.org/api from 0.149.0 to 0.150.0 in /hack/tools (#9691) +- Dependency: Bump google.golang.org/grpc from 1.58.2 to 1.58.3 (#9607) +- Dependency: Bump kpromo to v4.0.4 (#9241) +- Dependency: Bump opentelemetry to fix CVEs (#9709) +- Dependency: Bump sigs.k8s.io/yaml from 1.3.0 to 1.4.0 (#9645) +- Dependency: Bump some dependencies in Makefile (#9549) +- Dependency: Bump the kubernetes group in /hack/tools with 2 updates (#9420) +- Dependency: Bump the kubernetes group in /hack/tools with 2 updates (#9574) +- Dependency: Bump the kubernetes group with 2 updates (#9606) +- Dependency: Bump the kubernetes group with 4 updates (#9326) +- Dependency: Bump the kubernetes group with 4 updates (#9455) +- Dependency: Bump the kubernetes group with 4 updates (#9739) +- Dependency: Bump Trivy to v0.47.0 (#9671) +- Dependency: Replace hashicorp/go-multierror with kerrors (#9175) +- Dependency: Update ensure-kubectl.sh to 1.28 (#9275) +- Devtools: Add .PHONY for doctoc target (#9148) +- Devtools: Allow non-provider Deployments in Tilt (#9404) +- Devtools: Bump CAPI visualizer to v1.2.0 (#9195) +- Devtools: Drop duplicate pprof and unused linter excludes (#9156) +- Devtools: Improve Performance dashboard (#9387) +- Devtools: Make dev cluster networking configurable (#9183) +- Devtools: Makefile: run doctoc only once (#9182) +- Devtools: Move tilt-prepare and log-push to tools/internal (#9020) +- Devtools: Observability: move metrics to config and use sidecar in kube-state-metrics (#9390) +- Devtools: Refactor docker-push* Makefile targets so users can control with ALL_DOCKER_BUILD which images are pushed (#8586) +- Devtools: Tiltfile: rename deploy_kustomizations to additional_kustomizations (#9439) +- e2e: Add back flavor to Waiter interface (#9166) +- e2e: Add CRS re-reconcile to ownerReference test (#9296) +- e2e: Add log level for kube components patch to ClusterClass (#9493) +- e2e: Add MachinePools to Topology Quickstart E2E Templates (#9393) +- e2e: Add MP timeouts to cluster upgrade tests (#9723) +- e2e: Add test for ownerReference apiVersion update (#9269) +- e2e: Add test for scale testing machinery (#9510) +- e2e: Bump autoscaler to v1.28.0 (#9349) +- e2e: Drop PR-Informing test tag and job description (#9362) +- e2e: Dump all pods in e2e test clusters (#9441) +- e2e: Dump all resource information for self-hosted tests (#9547) +- e2e: Ensure finalizers are resilient on reconciliation (#9471) +- e2e: Fail tests if test env version check fails (#9388) +- e2e: Fix broken e2e test clusterclass (#9506) +- e2e: Improve labels/annotations in CAPD test ClusterClass (#9469) +- e2e: Improve logging for self-hosted e2e test (#9637) +- e2e: Improve output of exec.KubectlApply (#9761) +- e2e: Refactor e2e ownerRef test utils (#9313) +- e2e: Test/conformance: add variables to overwrite parralelization of kubetest runs (#9667) +- e2e: Test/e2e: structure resources by namespace/kind again (#9462) +- e2e: Use existing value of `SKIP_RESOURCE_CLEANUP` if set in environment (#9152) +- IPAM: Add age also to v1beta1 IPAM types (#9729) +- IPAM: Add age column to kubectl output (#9521) +- IPAM: Enforce IPAM import alias (#9730) +- KCP: Controlplane: add a test case for syncMachines where the InfraMachine does not exist. (#8992) +- KCP: Remove disableInPlacePropagation field in KCP controller (#9099) +- KCP: Remove redundant GetRESTConfig in KCP Management.GetWorkloadCluster (#9448) +- KCP: Support admin config for Kubeadm v1.29 (#9682) +- Logging: Change default log level to 2 (#9093) +- Logging: Fix patch errors not being logged (#9224) +- Logging: Set controller-runtime logger for clusterctl (#9107) +- MachinePool: Add MachinePool Builders (#9346) +- MachinePool: Add util function to get MachinePool by label (#9219) +- Metrics: Hack/observability: add capi_machine_status_certificatesexpirydate metric (#9084) +- Metrics: Hack: bump kube-state-metrics and prometheus charts (#9352) +- Release: Add additional blocks to release note generation (#9247) +- Release: Add adilGhaffarDev to release-team owner alias (#9718) +- Release: Add Release Team OWNERS file to docs/release folder (#9294) +- Release: Avoid cd'ing into test folders when building Docker images (#9744) +- Release: Bump kubebuilder tools to v0.4.2 (#9665) +- Release: Bump PR title checker github action to v0.4.1 (#9650) +- Release: Capitalize area prefix in release notes by default (#9614) +- Release: Capitalized title in release notes (#9086) +- Release: Clarify release team vs k8s/k8s-SIGs org membership (#9089) +- Release: Exclude release trigger PRs from release notes (#9444) +- Release: Format Community meeting area in release notes (#9648) +- Release: Format MachineHealthCheck area in release notes (#9500) +- Release: Give permissions for release notes tool to release team (#9563) +- Release: Hack: add weekly update script for Slack (#9343) +- Release: Improve multiple areas PRs with user friendly subs (#9071) +- Release: Improve release notes formatting (#9337) +- Release: Improve release speed (#9392) +- Release: Improve release staging build speed (#9536) +- Release: Prepare main branch for v1.6 development (#9097) +- Release: Remove auto-labelling for clusterctl (#8990) +- Release: Remove extra separator after title in release notes (#9605) +- Release: Revert "Improve release staging build speed" (#9753) +- Release: Update instructions checklist to generate release notes (#9443) +- Release: Update kubebuilder-release-tools to v0.4.0 (#9531) +- Release: Update release-notes make target + corresponding doc (#9573) +- Runtime SDK: Improve ClusterClass watch for ExtensionConfigs (#9338) +- Testing: Add MachinePool test cases (#9474) +- Testing: Add MachinePool test cases to engine tests (#9373) +- Testing: Add MachinePool test cases to variables tests (#9372) +- Testing: Add MachinePools to Runtime SDK and Rollout tests (#9703) +- Testing: Add MachinePools to topology upgrade test (#9502) +- Testing: Add test for required properties in clusterclass variables (#9113) +- Testing: Add unit tests for CC MP blueprint, current_state, & desired_state (#9348) +- Testing: Add unit tests for reconcile_state, cluster_controller, & conditions (#9380) +- Testing: Add WaitForMachinePoolToBeUpgraded to self-hosted test (#9540) +- Testing: Extend test/framework to collect workload cluster nodes (#9416) +- Testing: Follow-up fixes: Add MachinePools to Runtime SDK and Rollout tests (#9719) +- Testing: Replacing gomega.Equal with gomega.BeComparableTo (#9015) +- util: Adjust naming in SortForCreate implementation (#9311) +- util: Move `internal.labels` to `format` package for use by providers (#9002) +- util: Refactor SortForCreate to use sort.Slice (#9251) +- util: Remove previously deprecated code (#9136) + +:book: Additionally, there have been 55 contributions to our documentation and book. (#8260, #8500, #8678, #8819, #8988, #9001, #9013, #9014, #9024, #9029, #9080, #9081, #9087, #9112, #9119, #9141, #9146, #9150, #9161, #9173, #9208, #9209, #9213, #9214, #9232, #9270, #9286, #9291, #9305, #9328, #9364, #9386, #9403, #9415, #9429, #9433, #9463, #9487, #9488, #9490, #9511, #9513, #9514, #9527, #9550, #9559, #9565, #9572, #9577, #9590, #9593, #9613, #9635, #9654, #9706) + +
+
+ +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.6.0.md b/CHANGELOG/v1.6.0.md new file mode 100644 index 000000000000..e94f62632b7f --- /dev/null +++ b/CHANGELOG/v1.6.0.md @@ -0,0 +1,295 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.25.x -> v1.28.x +- Workload Cluster: v1.23.x -> v1.28.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Highlights + +* MachinePools are now supported in ClusterClass Clusters +* Metrics, profiling and other diagnostics are now served securely by default +* Types in `ipam.cluster.x-k8s.io` have graduated to `v1beta1` + +## Deprecation Warning + +- The API version `v1alpha4` is no longer served in this release. + - [Version migration guide](https://main.cluster-api.sigs.k8s.io/developer/providers/version-migration.html) + - [GitHub issue #8038](https://github.com/kubernetes-sigs/cluster-api/issues/8038) + - [API deprecation details](https://main.cluster-api.sigs.k8s.io/contributing#removal-of-v1alpha3--v1alpha4-apiversions) +- The API version `v1alpha3` has been completely removed in this release. +- Flag `--metrics-bind-addr` is [deprecated](https://github.com/kubernetes-sigs/cluster-api/pull/9264) for all controllers + +## Changes since v1.5.0 +## :chart_with_upwards_trend: Overview +- 389 new commits merged +- 5 breaking changes :warning: +- 15 feature additions ✨ +- 35 bugs fixed 🐛 + +## :memo: Proposals +- Community meeting: Add proposal for karpenter integration feature group (#9571) + +## :warning: Breaking Changes +- API: Remove v1alpha3 API Version (#8997) +- API: Stop serving v1alpha4 API Versions (#8996) +- clusterctl: Improve Context handling in clusterctl (#8939) +- Dependency: Bump to controller-runtime v0.16 (#8999) +- Metrics/Logging: Implement secure diagnostics (metrics, pprof, log level changes) (#9264) + +## :sparkles: New Features +- API: Add validation to nested ObjectMeta fields (#8431) +- CAPD: Add config maps to CAPD RBAC (#9528) +- CAPD: Add MachinePool Machine implementation to CAPD components (#8842) +- CAPD: Allow adding custom HA proxy config for CAPD load balancer (#8785) +- CAPD: Initialize configmap object before getting it (#9529) +- ClusterClass: Add topology-owned label to MachineHealthChecks. (#9191) +- ClusterClass: Introduce NamingStrategy and allow generating names using go templates (#9340) +- ClusterClass: Update API with ClusterClass MachinePool support (#8820) +- clusterctl: Add RKE2 bootstrap provider to clusterctl (#9720) +- clusterctl: Block move with annotation (#8690) +- IPAM: Promote IPAM types to v1beta1 (#9525) +- MachinePool: Add MachinePool workers support in ClusterClass (#9016) +- MachineSet: Adjust preflight check to allow kubelet version skew of 3 for clusters running v1.28 and above (#9222) +- Release: Add automation to create release branch and tags (#9111) +- Testing/Documentation: v1.28: Prepare quickstart, capd and tests for the new release including kind bump (#9160) + +## :bug: Bug Fixes +- CABPK: Certificate paths in cloud-init scripts should not use a platform-dependent path separator (#9167) +- CAPD: Delete container after failed start to work around port allocation issues (#9125) +- CAPD: Fix DockerMachine panic (#9673) +- CI: Fix reporting bug in verify-container-image script (#9676) +- ClusterClass: Fix ClusterClass enqueue for ExtensionConfig (#9133) +- ClusterClass: Topology: fix namingstrategy webhook to not use uppercase characters for testing the template and align unit test to e2e test (#9425) +- clusterctl: Fix provider namespace secret not included in clusterctl move (#9694) +- ClusterResourceSet: Requeue after 1 minute if ErrClusterLocked got hit (#9788) +- Dependency: Bump golang.org/x/net to v0.13.0 (#9121) +- Dependency: Bump to docker v24.0.5-0.20230714235725-36e9e796c6fc (#9038) +- Devtools: Adding metrics container port in tilt-prepare only if it's missing (#9308) +- Devtools: Allow duplicate objects in Tiltfile (#9302) +- Devtools: Change tilt debug base image to golang (#9070) +- Devtools: Fix tilt-prepare leader-elect setting (#9315) +- Devtools: Pin Plantuml version (#9424) +- Devtools: Tilt: ensure .tiltbuild/bin directory is created early enough, add tilt troubleshooting guide (#9165) +- e2e: Drop MachinePools from Dualstack tests (#9477) +- e2e: Fix autoscaler image repo (#9353) +- e2e: Test: pin conformance image to a version which includes a fix for the dualstack tests (#9252) +- KCP: Allow dropping patches KubeadmControlPlane KubeadmConfig (#9698) +- KCP: Allow to drop useExperimentalRetryJoin field from KubeadmControlPlane.kubeadmConfigSpec (#9170) +- KCP: Fix KCP Controller reconcile always return error when workload cluster is unreachable (#9342) +- KCP: Requeue KCP object if ControlPlaneComponentsHealthyCondition is not yet true (#9032) +- Machine: Retry Node delete when CCT is locked (#9570) +- MachineDeployment: Use regular random suffix for MachineSets, ensure max length 63 (#9298) +- MachineHealthCheck: Fix excessive trace logging in the machine health check controller (#9419) +- MachinePool: Fix bug where MachinePool Machine ownerRefs weren't updating (#9619) +- Release: Alphabetically sorting release tool output (#9055) +- Release: Deduplicating area in pr title in release notes (#9186) +- Release: Fix go install path for kpromo v4.0.4 (#9336) +- Release: Generate warning when release notes can not be generated (#9163) +- Release: Hack/release-notes: ensure relase notes tool can be used for external projects again (#9018) +- Release: Reverts pull request from cahillsf/improve-release-speed (#9465) +- Runtime SDK: Set User Agent for test extension correctly (#9748) +- util: Fix AddAnnotations for unstructured.Unstructured (#9164) + +## :seedling: Others +- API: Add ClusterClass column to Cluster CRD (#9120) +- API: Add verify-import-restrictions to enforce import restrictions (#9407) +- API: Enforce import restrictions in all API packages (#9461) +- API: Ensure we generate all webhook manifests.yaml (#9621) +- API: Move API v1beta1 webhooks to a separate package (#9047) +- API: Move docker infrastructure experimental API v1beta1 webhooks to sepa… (#9460) +- API: Move docker infrastructure API v1beta1 webhooks to separate package (#9458) +- API: Move experimental addons API v1beta1 webhooks to separate package (#9438) +- API: Move experimental API v1beta1 webhooks to separate package (#9417) +- API: Move inmemory infrastructure API v1beta1 webhooks to separate package (#9459) +- API: Move Kubeadm API v1beta1 webhooks to separate package (#9410) +- API: Remove files and markers for Kubebuilder (#9344) +- API: Remove reliance on controller-runtime scheme builder (#9045) +- API: Remove reliance on controller-runtime scheme builder for experimental APIs (#9185) +- API: Remove reliance on controller-runtime scheme builder for remaining API groups (#9266) +- API: Remove the dependency on cluster-api/utils from addons API (#9482) +- API: Test and document controller ownerReferences (#9153) +- CABPK: Remove Kubeadm upstream v1beta1 types (#9345) +- CAPD: Fix multi error handling in RunContainer (#9139) +- CAPD: Set Condition, if creating external LB failed. (#9697) +- CI: Add colored-line-number output for golangci-lint action (#9147) +- CI: Add dependabot for test and hack/tools module (#9041) +- CI: Add exclude for Kustomize API to dependabot config (#9059) +- CI: Add licence-scan for pull requests (#9184) +- CI: Add loggercheck linter and fix findings (#9446) +- CI: Add verify-govulncheck and verify-vulnerabilities targets and integrate to scan action (#9144) +- CI: Bump actions/cache from 3.3.1 to 3.3.2 (#9395) +- CI: Bump actions/checkout from 4.1.0 to 4.1.1 (#9611) +- CI: Bump actions/github-script from 7.0.0 to 7.0.1 (#9738) +- CI: Bump actions/setup-go from 4.0.1 to 4.1.0 (#9187) +- CI: Bump apidiff to v0.7.0 (#9472) +- CI: Bump golangci-lint to v1.54.1 (#9174) +- CI: Bump golangci/golangci-lint-action from 3.6.0 to 3.7.0 (#9261) +- CI: Bump tj-actions/changed-files from 40.1.0 to 40.1.1 (#9714) +- CI: Fix .golangci.yml comments (#9499) +- CI: Ginkgolinter: forbid focus container (#9320) +- CI: Github: add edited and reopened as triggers for the GH workflow approval (#9259) +- CI: Github: add workflow to auto-approve golangci-lint if ok-to-test label is set (#9244) +- CI: Go.mod reformat to have only two require blocks (#9192) +- CI: Golangci-lint: replace deprecated local-prefixes setting for gci (#9339) +- CI: Make GO_ARCH explicit in verify_containter script (#9341) +- CI: Update actions for 1.5 and make names consistent (#9115) +- CI: Verify plantuml image generation in CI (#9363) +- ClusterCacheTracker: Add separate concurrency flag for cluster cache tracker (#9116) +- ClusterCacheTracker: Ensure Get/List calls are not getting stuck when apiserver is unreachable (#9028) +- ClusterCacheTracker: Fix accessor deletion on health check failure (#9025) +- ClusterClass: Add additional test cases for nested variable defaulting (#9728) +- ClusterClass: Add ownerRefs to BootstrapConfig/InfraMachinePool in classy Clusters (#9389) +- ClusterClass: Embed ssa.FilterObjectInput into HelperOption to remove duplication (#9512) +- ClusterClass: Fix some nits in Cluster topology engine tests (#9464) +- ClusterClass: Improve Cluster variable defaulting/validation errors (#9452) +- ClusterClass: Improve message for TopologyReconciledCondition (#9400) +- ClusterClass: Make ClusterClass generated object names consistent (#9254) +- ClusterClass: Minor fixes for CC+MP implementation (#9318) +- clusterctl: Add Proxmox provider quickstart (#9812) +- clusterctl: Check resource blocking clusterctl move during discovery (#9246) +- clusterctl: Propagate ctx to retryWithExponentialBackoff in clusterctl (#9437) +- clusterctl: Use goproxy to check version in clusterctl (#9237) +- clusterctl: Use http get to download files from GitHub in clusterctl (#9236) +- Dependency: Bump cert-manager to v1.13.2 (#9653) +- Dependency: Bump cloud.google.com/go/storage from 1.34.1 to 1.35.1 in /hack/tools (#9727) +- Dependency: Bump controller tools to v1.13.0 (#9221) +- Dependency: Bump controller-runtime to v0.16.3 (#9592) +- Dependency: Bump conversion-gen to v0.28 (#9267) +- Dependency: Bump corefile-migration library to v1.0.21 (#9307) +- Dependency: Bump docker to v24.0.5 (#9064) +- Dependency: Bump envtest binaries to 1.28 (#9268) +- Dependency: Bump github.com/blang/semver to v4 (#9189) +- Dependency: Bump github.com/docker/distribution (#9544) +- Dependency: Bump github.com/docker/docker from 24.0.6+incompatible to 24.0.7+incompatible in /test (#9652) +- Dependency: Bump github.com/emicklei/go-restful/v3 from 3.10.2 to 3.11.0 in /test (#9272) +- Dependency: Bump github.com/evanphx/json-patch/v5 from 5.6.0 to 5.7.0 (#9397) +- Dependency: Bump github.com/fatih/color from 1.15.0 to 1.16.0 (#9681) +- Dependency: Bump github.com/go-logr/logr from 1.2.4 to 1.3.0 (#9644) +- Dependency: Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 (#9562) +- Dependency: Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.1 (#9716) +- Dependency: Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#9717) +- Dependency: Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 (#9517) +- Dependency: Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 (#9679) +- Dependency: Bump github.com/spf13/viper from 1.16.0 to 1.17.0 (#9561) +- Dependency: Bump Go version to v1.20.11 (#9683) +- Dependency: Bump go-github dependency to version v53 (#8995) +- Dependency: Bump go.etcd.io/etcd/api/v3 from 3.5.9 to 3.5.10 (#9642) +- Dependency: Bump go.etcd.io/etcd/client/v3 from 3.5.9 to 3.5.10 (#9646) +- Dependency: Bump go.opentelemetry.io/* dependencies (#9598) +- Dependency: Bump golang.org/grpc to v1.59.0 (#9626) +- Dependency: Bump golang.org/x/net from 0.15.0 to 0.17.0 in /test (#9537) +- Dependency: Bump golang.org/x/oauth2 from 0.13.0 to 0.14.0 (#9715) +- Dependency: Bump golang.org/x/text from 0.13.0 to 0.14.0 (#9680) +- Dependency: Bump golangci-lint to v1.55.2 (#9740) +- Dependency: Bump gomodules.xyz/jsonpatch/v2 from 2.3.0 to 2.4.0 (#9188) +- Dependency: Bump google.golang.org/api from 0.149.0 to 0.150.0 in /hack/tools (#9691) +- Dependency: Bump google.golang.org/grpc from 1.58.2 to 1.58.3 (#9607) +- Dependency: Bump kpromo to v4.0.4 (#9241) +- Dependency: Bump opentelemetry to fix CVEs (#9709) +- Dependency: Bump sigs.k8s.io/yaml from 1.3.0 to 1.4.0 (#9645) +- Dependency: Bump some dependencies in Makefile (#9549) +- Dependency: Bump the kubernetes group in /hack/tools with 2 updates (#9420) +- Dependency: Bump the kubernetes group in /hack/tools with 2 updates (#9574) +- Dependency: Bump the kubernetes group with 2 updates (#9606) +- Dependency: Bump the kubernetes group with 4 updates (#9326) +- Dependency: Bump the kubernetes group with 4 updates (#9455) +- Dependency: Bump the kubernetes group with 4 updates (#9739) +- Dependency: Bump Trivy to v0.47.0 (#9671) +- Dependency: Replace hashicorp/go-multierror with kerrors (#9175) +- Dependency: Update ensure-kubectl.sh to 1.28 (#9275) +- Devtools: Add .PHONY for doctoc target (#9148) +- Devtools: Allow non-provider Deployments in Tilt (#9404) +- Devtools: Bump CAPI visualizer to v1.2.0 (#9195) +- Devtools: Drop duplicate pprof and unused linter excludes (#9156) +- Devtools: Improve Performance dashboard (#9387) +- Devtools: Make dev cluster networking configurable (#9183) +- Devtools: Makefile: run doctoc only once (#9182) +- Devtools: Move tilt-prepare and log-push to tools/internal (#9020) +- Devtools: Observability: move metrics to config and use sidecar in kube-state-metrics (#9390) +- Devtools: Refactor docker-push* Makefile targets so users can control with ALL_DOCKER_BUILD which images are pushed (#8586) +- Devtools: Tiltfile: rename deploy_kustomizations to additional_kustomizations (#9439) +- e2e: Add back flavor to Waiter interface (#9166) +- e2e: Add CRS re-reconcile to ownerReference test (#9296) +- e2e: Add log level for kube components patch to ClusterClass (#9493) +- e2e: Add MachinePools to Topology Quickstart E2E Templates (#9393) +- e2e: Add MP timeouts to cluster upgrade tests (#9723) +- e2e: Add test for ownerReference apiVersion update (#9269) +- e2e: Add test for scale testing machinery (#9510) +- e2e: Bump autoscaler to v1.28.0 (#9349) +- e2e: Drop PR-Informing test tag and job description (#9362) +- e2e: Dump all pods in e2e test clusters (#9441) +- e2e: Dump all resource information for self-hosted tests (#9547) +- e2e: Ensure finalizers are resilient on reconciliation (#9471) +- e2e: Fail tests if test env version check fails (#9388) +- e2e: Fix broken e2e test clusterclass (#9506) +- e2e: Improve labels/annotations in CAPD test ClusterClass (#9469) +- e2e: Improve logging for self-hosted e2e test (#9637) +- e2e: Improve output of exec.KubectlApply (#9761) +- e2e: Refactor e2e ownerRef test utils (#9313) +- e2e: Test/conformance: add variables to overwrite parralelization of kubetest runs (#9667) +- e2e: Test/e2e: structure resources by namespace/kind again (#9462) +- e2e: Use existing value of `SKIP_RESOURCE_CLEANUP` if set in environment (#9152) +- IPAM: Add age also to v1beta1 IPAM types (#9729) +- IPAM: Add age column to kubectl output (#9521) +- IPAM: Enforce IPAM import alias (#9730) +- KCP: Controlplane: add a test case for syncMachines where the InfraMachine does not exist. (#8992) +- KCP: Remove disableInPlacePropagation field in KCP controller (#9099) +- KCP: Remove redundant GetRESTConfig in KCP Management.GetWorkloadCluster (#9448) +- KCP: Support admin config for Kubeadm v1.29 (#9682) +- Logging: Change default log level to 2 (#9093) +- Logging: Fix patch errors not being logged (#9224) +- Logging: Set controller-runtime logger for clusterctl (#9107) +- MachinePool: Add MachinePool Builders (#9346) +- MachinePool: Add util function to get MachinePool by label (#9219) +- MachinePool: Use SSA Patch to create machines in MP controller (#9802) +- Metrics: Hack/observability: add capi_machine_status_certificatesexpirydate metric (#9084) +- Metrics: Hack: bump kube-state-metrics and prometheus charts (#9352) +- Release: Add additional blocks to release note generation (#9247) +- Release: Add adilGhaffarDev to release-team owner alias (#9718) +- Release: Add Release Team OWNERS file to docs/release folder (#9294) +- Release: Avoid cd'ing into test folders when building Docker images (#9744) +- Release: Bump kubebuilder tools to v0.4.2 (#9665) +- Release: Bump PR title checker github action to v0.4.1 (#9650) +- Release: Capitalize area prefix in release notes by default (#9614) +- Release: Capitalized title in release notes (#9086) +- Release: Clarify release team vs k8s/k8s-SIGs org membership (#9089) +- Release: Exclude release trigger PRs from release notes (#9444) +- Release: Format Community meeting area in release notes (#9648) +- Release: Format MachineHealthCheck area in release notes (#9500) +- Release: Give permissions for release notes tool to release team (#9563) +- Release: Hack: add weekly update script for Slack (#9343) +- Release: Improve multiple areas PRs with user friendly subs (#9071) +- Release: Improve release notes formatting (#9337) +- Release: Improve release speed (#9392) +- Release: Improve release staging build speed (#9536) +- Release: Prepare main branch for v1.6 development (#9097) +- Release: Remove auto-labelling for clusterctl (#8990) +- Release: Remove extra separator after title in release notes (#9605) +- Release: Revert "Improve release staging build speed" (#9753) +- Release: Update instructions checklist to generate release notes (#9443) +- Release: Update kubebuilder-release-tools to v0.4.0 (#9531) +- Release: Update release-notes make target + corresponding doc (#9573) +- Runtime SDK: Improve ClusterClass watch for ExtensionConfigs (#9338) +- Testing: Add MachinePool test cases (#9474) +- Testing: Add MachinePool test cases to engine tests (#9373) +- Testing: Add MachinePool test cases to variables tests (#9372) +- Testing: Add MachinePools to Runtime SDK and Rollout tests (#9703) +- Testing: Add MachinePools to topology upgrade test (#9502) +- Testing: Add test for required properties in clusterclass variables (#9113) +- Testing: Add unit tests for CC MP blueprint, current_state, & desired_state (#9348) +- Testing: Add unit tests for reconcile_state, cluster_controller, & conditions (#9380) +- Testing: Add WaitForMachinePoolToBeUpgraded to self-hosted test (#9540) +- Testing: Extend test/framework to collect workload cluster nodes (#9416) +- Testing: Follow-up fixes: Add MachinePools to Runtime SDK and Rollout tests (#9719) +- Testing: Replacing gomega.Equal with gomega.BeComparableTo (#9015) +- util: Adjust naming in SortForCreate implementation (#9311) +- util: Move `internal.labels` to `format` package for use by providers (#9002) +- util: Refactor SortForCreate to use sort.Slice (#9251) +- util: Remove go-vcs dependency from releaselink tool (#9288) +- util: Remove previously deprecated code (#9136) + +:book: Additionally, there have been 55 contributions to our documentation and book. (#8260, #8500, #8678, #8819, #8988, #9001, #9013, #9014, #9024, #9029, #9080, #9081, #9087, #9112, #9119, #9141, #9146, #9150, #9161, #9173, #9208, #9209, #9213, #9214, #9232, #9270, #9286, #9291, #9305, #9328, #9364, #9386, #9403, #9415, #9429, #9433, #9463, #9487, #9488, #9490, #9511, #9513, #9514, #9527, #9550, #9559, #9565, #9572, #9577, #9590, #9593, #9613, #9635, #9654, #9706) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.6.1.md b/CHANGELOG/v1.6.1.md new file mode 100644 index 000000000000..83ad84cafc17 --- /dev/null +++ b/CHANGELOG/v1.6.1.md @@ -0,0 +1,49 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.25.x -> v1.29.x +- Workload Cluster: v1.23.x -> v1.29.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Highlights + +* Kubernetes v1.29 is now supported + +## Changes since v1.6.0 +## :chart_with_upwards_trend: Overview +- 30 new commits merged +- 1 feature addition ✨ +- 9 bugs fixed 🐛 + +## :sparkles: New Features +- Control-plane: KCP: Allow mutation of all fields that should be mutable (#9884) + +## :bug: Bug Fixes +- CAPD: Fix ignition to also set the kube-proxy configuration to skip setting sysctls (#9895) +- clusterctl: Validate no objects exist from CRDs before deleting them (#9834) +- e2e: Make MachinePools and MachineDeployments optional in ApplyClusterTemplateAndWait (#9962) +- e2e: Test: Fix finalizers test to not only rely on namespaced name (#10004) +- e2e: Test: wait for topology to get rolled out before continuing with scaling checks (#9827) +- IPAM: Fix webhooks using mixed api versions (#9863) +- Logging: Improve log k/v pairs and a improve/drop a few log lines (#9880) +- MachinePool: Fix TestReconcileMachinePoolScaleToFromZero flakes (#9822) +- Testing: SSA: fix flaky test TestPatch/Test patch with Machine (#9915) + +## :seedling: Others +- CI: Bump kubebuilder envtest to 1.29.0 (#10014) +- Dependency: Bump golang.org/x/crypto to v0.17.0 (#9943) +- Dependency: Bump to Go 1.20.12 (#9841) +- Dependency: Go.mod: bump github.com/cloudflare/circl to v1.3.7 (#10002) +- e2e: Add DeepCopy method for E2EConfig (#9989) +- e2e: Add test util to resolve Kubernetes versions (#9996) +- e2e: Bump cluster-autoscaler to v1.29.0 (#9948) +- e2e: Drop duplicated scale test (#9976) +- e2e: Make etcd and CoreDNS optional in upgrade and self-hosted tests (#9965) +- e2e: Test/framework add WatchDaemonSetLogsByLabelSelector method (#9994) +- Testing: add dynamic finalizer assertions for e2e framework (#10003)] +- Testing: v1.29: Prepare quickstart, capd and tests for the new release including kind bump (#9890) + +:book: Additionally, there have been 8 contributions to our documentation and book. (#9815, #9816, #9824, #9830, #9878, #9902, #9951, #9979) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.6.2.md b/CHANGELOG/v1.6.2.md new file mode 100644 index 000000000000..5ab1ec8d97e7 --- /dev/null +++ b/CHANGELOG/v1.6.2.md @@ -0,0 +1,34 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.25.x -> v1.29.x +- Workload Cluster: v1.23.x -> v1.29.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Highlights +* :warning: Warning: This release fixes a bug (#10051) that was introduced in v1.6.0, which caused a regression in the conversion of v1alpha3/v1alpha4 objects. It is recommended to upgrade to v1.6.2 to avoid the issue. + +## Changes since v1.6.1 +## :chart_with_upwards_trend: Overview +- 16 new commits merged +- 3 bugs fixed 🐛 + +## :bug: Bug Fixes +- [API/e2e]: Restore v1alpha3/v1alpha4 conversion to fix SSA issue & add e2e test coverage (#10151) + - :warning: Warning: This change is a fix for the conversion bug that was introduced in v1.6.0. +- ClusterCacheTracker: Fix ClusterCacheTracker memory leak (#10064) +- Machine: Watch external objects for machine before deleting (#10177) + +## :seedling: Others +- clusterctl: Bump cert-manager to v1.14.2 (#10120) (#10127) +- clusterctl: Clarify rules for adding new clusterctl default providers (#10109) +- Community meeting: Promote chrischdi to Cluster API maintainer (#10089) +- Dependency: Bump controller runtime v0.16.5 (#10163) +- Dependency: Bump Go to 1.21.5 (#10152) +- e2e: Use manager in test extension (#10106) +- Testing: Print conformance image used in kubetest (#10081) + +:book: Additionally, there have been 4 contributions to our documentation and book. (#10024, #10047, #10105, #10116) + + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.6.3.md b/CHANGELOG/v1.6.3.md new file mode 100644 index 000000000000..eca936b7cf97 --- /dev/null +++ b/CHANGELOG/v1.6.3.md @@ -0,0 +1,39 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.25.x -> v1.29.x +- Workload Cluster: v1.23.x -> v1.29.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.6.2 +## :chart_with_upwards_trend: Overview +- 14 new commits merged +- 2 bugs fixed 🐛 + +## :bug: Bug Fixes +- CAPD: Fix(capd): remove hack for btrfs/zfs support (#10192) +- e2e: Test: retry GetOwnerGraph in owner references test on certificate errors (#10217) + +## :seedling: Others +- CABPK: Add pod metadata to capbk manager (#10212) +- clusterctl: Add 0 default to worker-machine-count help (#10205) +- clusterctl: Add hivelocity infra provider to clusterctl (#10168) +- Dependency: Bump go version to 1.21.8 (#10246) +- Dependency: Bump protobuf to v1.33.0 to address CVEs (#10249) +- Release: Improve release-staging build (#10190) +- Release: Read in dockerfiles from stdin (#10209) + +:book: Additionally, there have been 4 contributions to our documentation and book. (#10230, #10241, #10243, #10260) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- google.golang.org/protobuf: v1.31.0 → v1.33.0 + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.6.4.md b/CHANGELOG/v1.6.4.md new file mode 100644 index 000000000000..fd936db59326 --- /dev/null +++ b/CHANGELOG/v1.6.4.md @@ -0,0 +1,48 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.25.x -> v1.29.x +- Workload Cluster: v1.23.x -> v1.29.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.6.3 +## :chart_with_upwards_trend: Overview +- 21 new commits merged +- 8 bugs fixed 🐛 + +## :bug: Bug Fixes +- CI: Fix for TestServerSideApplyWithDefaulting (#10327) +- ClusterClass: Improve handling of topology orphaned objects (#10326) +- e2e: Fix kubetest to allow parallel execution on different clusters (#10427) +- KCP: Delete out of date machines with unhealthy control plane component conditions when rolling out KCP (#10196) +- KCP: Skip checking `clusterConfiguration.dns` fields when KCP checking MachineNeedRollout (#10303) +- MachineSet: deletion priority to avoid deleting too many machines (#10431) +- Runtime SDK: Fix ClusterClass variables status & RuntimeExtension and add test coverage (#10341) + +## :seedling: Others +- CI: Added go directive test (#10348) +- CI: Set linkchecker base branch to release-1.6 (#10359) +- ClusterClass: Add ClusterClass variables metadata (#10320) +- ClusterClass: Implement topology validations for topology kubernetes version upgrades (#10318) +- ClusterClass: use the alias for ClusterCacheTrackerReader instead of the internal reference (#10319) +- clusterctl: Bump cert-manager to 1.14.4 (#10272) +- Dependency: Bump docker to address CVE (#10313) +- Dependency: Bump golang to v1.21.9 and golang.org/x/net to mitigate CVE-2023-45288 (#10376) + +:book: Additionally, there have been 5 contributions to our documentation and book. (#10295, #10338, #10394, #10404, #10416) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- golang.org/x/crypto: v0.17.0 → v0.21.0 +- golang.org/x/net: v0.18.0 → v0.23.0 +- golang.org/x/sys: v0.15.0 → v0.18.0 +- golang.org/x/term: v0.15.0 → v0.18.0 + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.6.5.md b/CHANGELOG/v1.6.5.md new file mode 100644 index 000000000000..cd7305099b33 --- /dev/null +++ b/CHANGELOG/v1.6.5.md @@ -0,0 +1,34 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.25.x -> v1.29.x +- Workload Cluster: v1.23.x -> v1.29.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.6.4 +## :chart_with_upwards_trend: Overview +- 6 new commits merged +- 4 bugs fixed 🐛 + +## :bug: Bug Fixes +- CAPD: Verify lb config after writing it (#10462) +- clusterctl: Ensure cert-manager objects get applied before other provider objects (#10504) +- e2e: Kubetest: also gather junit reports in case of errors observed from ginkgo (#10495) +- e2e: Test: Ensure ownerRef assertions for all Kinds are evaluated (#10593) + +## :seedling: Others +- API: Allow users to specify webhook server cert and key names (#10582) +- clusterctl: Bump cert-manager to 1.14.5 (#10518) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +_Nothing has changed._ + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.7.0-beta.0.md b/CHANGELOG/v1.7.0-beta.0.md new file mode 100644 index 000000000000..017629fddd08 --- /dev/null +++ b/CHANGELOG/v1.7.0-beta.0.md @@ -0,0 +1,396 @@ +🚨 This is a BETA RELEASE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). + +## Highlights + +* Enable kubeadm feature gates mutation + +## Deprecation Warning + +- The API version `v1alpha4` has been completely removed in this release. + +
+More details about the release + +:warning: **BETA RELEASE NOTES** :warning: + +## 👌 Kubernetes version support + +- Management Cluster: v1.26.x -> v1.29.x +- Workload Cluster: v1.24.x -> v1.29.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + + +## Changes since v1.6.0 +## :chart_with_upwards_trend: Overview +- 264 new commits merged +- 4 breaking changes :warning: +- 9 feature additions ✨ +- 33 bugs fixed 🐛 + +## :memo: Proposals +- MachinePool: Update MachinePool Machines proposal with diagrams (#9664) + +## :warning: Breaking Changes +- API: Remove v1alpha4 API Version (#9939) +- Dependency: Bump to controller-runtime v0.17.0 & k8s.io v0.29 (#9964) +- Logging: Make event recorder names consistent for KCP and topology controller (#9862) +- KCP/MachineSet: Objects generated by KCP, MachineSets and MachinePools will now consistently use machine name (#9833) + +## :sparkles: New Features +- API: Enable kubeadm feature gates mutation (#10154) +- clusterctl: Add k0smotron provider (#9991) +- clusterctl: Add public function to create new CRD migrator (#10075) +- Control-plane: KCP: Allow mutation of all fields that should be mutable (#9871) +- MachineDeployment: Add MachineSetReady condition to MachineDeployment (#9262) +- Release: Add utility to create git issues on provider repo (#9110) +- Release: Add dependencies section to release notes tool (#10043) +- Testing: Resolve release markers (#9265) +- Testing: V1.29: Prepare quickstart, capd and tests for the new release including kind bump (#9872) + +## :bug: Bug Fixes +- CAPD: Fix ignition to also set the kube-proxy configuration to skip setting sysctls (#9894) +- ClusterCacheTracker: Fix ClusterCacheTracker memory leak (#9543) +- ClusterCacheTracker: Use RequeueAfter instead of immediate requeue on ErrClusterLocked to not have exponentially increasing requeue time (#9810) +- clusterctl: Move handlePlugins function call out of init to allow debugging tests (#10200) +- clusterctl: Validate no objects exist from CRDs before deleting them (#9808) +- ClusterResourceSet: Requeue after 1 minute if ErrClusterLocked got hit (#9777) +- Control-plane: KCP should defer remediation when a control plane machine is still provisioning (#9734) +- Devtools: Tilt: Show default cluster-template (#9820) +- e2e: Ci: fix unset GINKGO_FOCUS variable (#10085) +- e2e: Make MachinePools and MachineDeployments optional in ApplyClusterTemplateAndWait (#9960) +- e2e: Re-introduce exclude capi-webhook-system to fix test flake (#10157) +- e2e: Test/e2e: Calculate correct worker count in clusterctl upgrade test (#9892) +- e2e: Test: Fix finalizers test to not only rely on namespaced name (#9891) +- e2e: Test: retry GetOwnerGraph in owner references test on certificate errors (#10201) +- IPAM: Fix webhooks using mixed api versions (#9861) +- KCP: Skip checking `clusterConfiguration.dns` fields when KCP checking MachineNeedRollout (#9857) +- Logging: Improve log k/v pairs and a improve/drop a few log lines (#9813) +- Machine: Bubble up machine drain condition in `MachinesReadyCondition` (#9355) +- Machine: Watch external objects for machine before deleting (#10041) +- MachinePool: Fix TestReconcileMachinePoolScaleToFromZero flakes (#9745) +- API/e2e: Restore v1alpha3/v1alpha4 conversion to fix SSA issue & add e2e test coverage (#10147) +- CABPK: Only refresh bootstrap token if needed, requeue in all cases where node hasn't joined yet (#9229) +- Release: Fix Community meeting area formatting in release notes (#9784) +- Release: Fix defaulting logic in release notes tool (#9958) +- Release: Fix documentation area comparison in release notes (#9769) +- Release: Fix kubeadm bootstrap prefix in release notes (#9814) +- Release: Fix wrong branch name display for weekly update script (#9918) +- Runtime SDK: Use keys/values structured log interface (#9998) +- Testing: Revert "Watch for Cluster resources in topology MD controller" (#9985) +- Testing: Reverting ginkgo.show-node-events to ginkgo.progress (#10214) +- Testing: Ssa: fix flaky test TestPatch/Test patch with Machine (#9914) +- Testing: Test: wait for topology to get rolled out before continuing with scaling checks (#9819) + +## :seedling: Others +- API: Stop relying on GVK being set on regular typed objects (#9956) +- Bootstrap: Add MachinePool test for switching bootstrap config to another ready/non-ready object with different name (#9616) +- CABPK: Add pod metadata to capbk manager (#10208) +- CAPD: Feat: make docker machine bootstrap timeout configurable (#9952) +- CAPD: Remove requeues in DockerMachinePool (#9725) +- CAPD: Test/e2e: Support configuring ExtraPortMappings for the kind cluster (#10046) +- CAPD: Remove duplicate fix for btrfs/zfs support (#8376) +- CAPIM: Allow using different resource group and listener name with the in memory server (#10096) +- CAPIM: Make in memory API server more tolerant when starting (#10211) +- CAPIM: Make in memory runtime and server accessible from outside (#9986) +- CI: Add fail fast to DumpResourcesForCluster in case of no route to host (#10204) +- CI: Bump conversion-gen to v0.29.0 (#10012) +- CI: Bump go-apidiff to v0.8.2 (#10011) +- CI: Bump kpromo to v4.0.5 (#10140) +- CI: Bump kubebuilder envtest to 1.29.0 (#10013) +- CI: DumpResourcesForCluster should fail fast for i/o errors (#10238) +- CI: Ensure build images contains correct binary and for correct architecture (#9932) +- CI: Fix Make target generate-go-openapi (#10161) +- CI: Hack/prowjob-gen skip creating empty files (#10022) +- CI: Hack: implement generator for prowjobs (#9937) +- CI: Makefile: bump dependencies (#10236) +- CI: Test/e2e: bump WorkloadKubernetesVersion for v1.6 clusterctl upgrade test (#10017) +- CI: Test: adjust capd dockerfile so the binary exposes the package in the built binraries path variable (#10030) +- CI: Update GH actions to work with new release-1.6 branch (#9708) +- ClusterClass: Add unit tests for MachinePools for webhooks and cc controller (#10055) +- ClusterClass: Implement topology validations for topology kubernetes version upgrades (#10063) +- clusterctl: Add 0 default to worker-machine-count help (#10203) +- clusterctl: Add completion for fish shell (#9950) +- clusterctl: Add hivelocity infra provider to clusterctl (#10168) +- clusterctl: Add in-cluster ipam provider (#8811) +- clusterctl: Add Proxmox provider quickstart (#9798) +- clusterctl: Better verbose logging on override path (#10180) +- clusterctl: Bump cert-manager to 1.14.2 (#10126) +- clusterctl: Bump cert-manager to v1.14.1 (#10113) +- clusterctl: Clarify rules for adding new clusterctl default providers (#9975) +- clusterctl: Deprecate clusterctl alpha topology plan (#10139) +- clusterctl: Replace context.TODO() from clusterctl proxy.go (#9776) +- Community meeting: Move CecileRobertMichon to emeritus (#10042) +- Community meeting: Move ykakarap to emeritus approvers (#10000) +- Community meeting: Promote chrischdi to Cluster API maintainer (#9997) +- Dependency: Bump `github.com/docker/docker` from 24.0.7 -> 25.0.0 (#10057) +- Dependency: Bump `golangci-lint` to v1.56.1 (#10124) +- Dependency: Bump controller-runtime to v0.17.1 (#10131) +- Dependency: Bump controller-runtime to v0.17.2 (#10162) +- Dependency: Bump controller-tools to v0.14 (#9987) +- Dependency: Bump crypto dep version (#9938) +- Dependency: Bump Go to 1.21.5 (#9900) +- Dependency: Bump go version to 1.21.8 (#10235) +- Dependency: Bump kind version to v0.22.0 (#10094) +- Dependency: Bump protobuf to v1.33.0 to address CVEs (#10248) +- Dependency: Bump the kubernetes group to v0.28.5 (#9933) +- Dependency: Bump to Go 1.20.12 (#9840) +- Dependency: Go.mod: bump github.com/cloudflare/circl to v1.3.7 (#10001) +- Devtools: Add missing folder to clean-tilt make target (#9934) +- Devtools: Allow tilt provider with pre-build images (#10244) +- Devtools: Explicitly set golangci config for sub modules (#9821) +- Devtools: Fix variables names in Tiltfile (#9811) +- Devtools: Simplify testing nightly builds with clusterctl (#10018) +- Devtools: Small improvements to tilt (#9936) +- e2e: Add conformance e2e tests (#10060) +- e2e: Add DeepCopy method for E2EConfig (#9988) +- e2e: Add PostCreateNamespace hook to E2E tests (#10067) +- e2e: Add test util to resolve Kubernetes versions (#9995) +- e2e: Allow to specify cluster name for E2E quick-start (#10088) +- e2e: Bump cluster-autoscaler to v1.29.0 (#9942) +- e2e: Drop duplicated scale test (#9968) +- e2e: Fix ci-latest test to actually use ci latest (#10080) +- e2e: Improve output of exec.KubectlApply (#9737) +- e2e: Make etcd and CoreDNS optional in upgrade and self-hosted tests (#9963) +- e2e: Test/framework add WatchDaemonSetLogsByLabelSelector method (#9984) +- e2e: Test/framework: expose CopyAndAmendClusterctlConfig function (#10086) +- e2e: Test: combine Finalizers tests to default quick start tests (#10039) +- e2e: Use --wait-providers in test framework InitWithBinary func (#10149) +- e2e: Use manager in test extension (#10097) +- e2e: Add scale testing for upgrades (#9077) +- Machine: Be more explicit when skipping node deletion (#10137) +- Machine: Mark Machine healthy condition as unknown if we can't list wl nodes (#9864) +- MachineDeployment: Set revision on machinedeployment and patch via defer (#10160) +- MachinePool: Use SSA Patch to create machines in MP controller (#9791) +- MachineSet: MS: improve replica defaulting for autoscaler (#9649) +- KCP/MachineSet: KCP/MS remove hardcoded WithStepCounterIf(false) (#9772) +- Release: Add integration test for release notes tool (#9617) +- Release: Bump kubebuilder-release-tools to v0.4.3 (#9818) +- Release: Changelog entry to include cert-manager to v1.14.2 (#10188) +- Release: Fix dockerfile for clusterctl build (#10058) +- Release: Fix grammar in release script (#9981) +- Release: Improve release-staging build (#10095) +- Release: Improve weekly PR update generation script and documentation (#10092) +- Release: Make release notes tool not dependent on local git (#9618) +- Release: Prep main branch for v1.7 development (#9799) +- Release: Provide faster support for new k8s minor releases (#9971) +- Release: Read in dockerfiles from stdin (#9990) +- Release: Swap in new 1.7 RT members in OWNERS (#9855) +- Runtime SDK: Add more helper functions in topologymutation varaible.go to help unmarshal variables (#9670) +- Testing: Drop policy v1beta1 API support in the testing framework (#10158) +- Testing: Drop unused runtime features for the in-memory provider (#9778) +- Testing: Fix typo in test framework (#9873) +- Testing: Fixing kubetest warnings for deprecations (#10172) +- Testing: Print conformance image used in kubetest (#10076) +- Testing: Remove k8s.io/utils/pointer from repository (#9836) +- Testing: Small improvements to the inmemory api server (#9935) +- Testing: Test: add dynamic finalizer assertions for e2e framework (#9970) +- Testing: Watch for Cluster resources in topology MachineSet & MachineDeployment controllers (#10029) +- Testing: Watch for Cluster resources in topology MD controller (#9865) +- util: Improve patch helper error handling (#9946) +- util: Use min/max funcs from Go SDK (#9945) + +:book: Additionally, there have been 43 contributions to our documentation and book. (#10005, #10031, #10040, #10061, #10066, #10068, #10084, #10099, #10100, #10115, #10122, #10170, #10174, #10194, #10239, #9585, #9640, #9767, #9771, #9779, #9782, #9786, #9794, #9797, #9801, #9817, #9829, #9831, #9838, #9856, #9866, #9867, #9868, #9876, #9896, #9897, #9908, #9941, #9949, #9957, #9961, #9972, #9993) + +## Dependencies + +### Added +- github.com/matttproud/golang_protobuf_extensions/v2: [v2.0.0](https://github.com/matttproud/golang_protobuf_extensions/tree/v2.0.0) + +### Changed +- cloud.google.com/go/accessapproval: v1.7.1 → v1.7.4 +- cloud.google.com/go/accesscontextmanager: v1.8.1 → v1.8.4 +- cloud.google.com/go/aiplatform: v1.48.0 → v1.52.0 +- cloud.google.com/go/analytics: v0.21.3 → v0.21.6 +- cloud.google.com/go/apigateway: v1.6.1 → v1.6.4 +- cloud.google.com/go/apigeeconnect: v1.6.1 → v1.6.4 +- cloud.google.com/go/apigeeregistry: v0.7.1 → v0.8.2 +- cloud.google.com/go/appengine: v1.8.1 → v1.8.4 +- cloud.google.com/go/area120: v0.8.1 → v0.8.4 +- cloud.google.com/go/artifactregistry: v1.14.1 → v1.14.6 +- cloud.google.com/go/asset: v1.14.1 → v1.15.3 +- cloud.google.com/go/assuredworkloads: v1.11.1 → v1.11.4 +- cloud.google.com/go/automl: v1.13.1 → v1.13.4 +- cloud.google.com/go/baremetalsolution: v1.1.1 → v1.2.3 +- cloud.google.com/go/batch: v1.3.1 → v1.6.3 +- cloud.google.com/go/beyondcorp: v1.0.0 → v1.0.3 +- cloud.google.com/go/bigquery: v1.53.0 → v1.57.1 +- cloud.google.com/go/billing: v1.16.0 → v1.17.4 +- cloud.google.com/go/binaryauthorization: v1.6.1 → v1.7.3 +- cloud.google.com/go/certificatemanager: v1.7.1 → v1.7.4 +- cloud.google.com/go/channel: v1.16.0 → v1.17.3 +- cloud.google.com/go/cloudbuild: v1.13.0 → v1.14.3 +- cloud.google.com/go/clouddms: v1.6.1 → v1.7.3 +- cloud.google.com/go/cloudtasks: v1.12.1 → v1.12.4 +- cloud.google.com/go/compute: v1.23.0 → v1.23.3 +- cloud.google.com/go/contactcenterinsights: v1.10.0 → v1.11.3 +- cloud.google.com/go/container: v1.24.0 → v1.27.1 +- cloud.google.com/go/containeranalysis: v0.10.1 → v0.11.3 +- cloud.google.com/go/datacatalog: v1.16.0 → v1.18.3 +- cloud.google.com/go/dataflow: v0.9.1 → v0.9.4 +- cloud.google.com/go/dataform: v0.8.1 → v0.9.1 +- cloud.google.com/go/datafusion: v1.7.1 → v1.7.4 +- cloud.google.com/go/datalabeling: v0.8.1 → v0.8.4 +- cloud.google.com/go/dataplex: v1.9.0 → v1.11.1 +- cloud.google.com/go/dataproc/v2: v2.0.1 → v2.2.3 +- cloud.google.com/go/dataqna: v0.8.1 → v0.8.4 +- cloud.google.com/go/datastore: v1.13.0 → v1.15.0 +- cloud.google.com/go/datastream: v1.10.0 → v1.10.3 +- cloud.google.com/go/deploy: v1.13.0 → v1.14.2 +- cloud.google.com/go/dialogflow: v1.40.0 → v1.44.3 +- cloud.google.com/go/dlp: v1.10.1 → v1.11.1 +- cloud.google.com/go/documentai: v1.22.0 → v1.23.5 +- cloud.google.com/go/domains: v0.9.1 → v0.9.4 +- cloud.google.com/go/edgecontainer: v1.1.1 → v1.1.4 +- cloud.google.com/go/essentialcontacts: v1.6.2 → v1.6.5 +- cloud.google.com/go/eventarc: v1.13.0 → v1.13.3 +- cloud.google.com/go/filestore: v1.7.1 → v1.7.4 +- cloud.google.com/go/firestore: v1.13.0 → v1.14.0 +- cloud.google.com/go/functions: v1.15.1 → v1.15.4 +- cloud.google.com/go/gkebackup: v1.3.0 → v1.3.4 +- cloud.google.com/go/gkeconnect: v0.8.1 → v0.8.4 +- cloud.google.com/go/gkehub: v0.14.1 → v0.14.4 +- cloud.google.com/go/gkemulticloud: v1.0.0 → v1.0.3 +- cloud.google.com/go/gsuiteaddons: v1.6.1 → v1.6.4 +- cloud.google.com/go/iam: v1.1.1 → v1.1.5 +- cloud.google.com/go/iap: v1.8.1 → v1.9.3 +- cloud.google.com/go/ids: v1.4.1 → v1.4.4 +- cloud.google.com/go/iot: v1.7.1 → v1.7.4 +- cloud.google.com/go/kms: v1.15.0 → v1.15.5 +- cloud.google.com/go/language: v1.10.1 → v1.12.2 +- cloud.google.com/go/lifesciences: v0.9.1 → v0.9.4 +- cloud.google.com/go/logging: v1.7.0 → v1.8.1 +- cloud.google.com/go/longrunning: v0.5.1 → v0.5.4 +- cloud.google.com/go/managedidentities: v1.6.1 → v1.6.4 +- cloud.google.com/go/maps: v1.4.0 → v1.6.1 +- cloud.google.com/go/mediatranslation: v0.8.1 → v0.8.4 +- cloud.google.com/go/memcache: v1.10.1 → v1.10.4 +- cloud.google.com/go/metastore: v1.12.0 → v1.13.3 +- cloud.google.com/go/monitoring: v1.15.1 → v1.16.3 +- cloud.google.com/go/networkconnectivity: v1.12.1 → v1.14.3 +- cloud.google.com/go/networkmanagement: v1.8.0 → v1.9.3 +- cloud.google.com/go/networksecurity: v0.9.1 → v0.9.4 +- cloud.google.com/go/notebooks: v1.9.1 → v1.11.2 +- cloud.google.com/go/optimization: v1.4.1 → v1.6.2 +- cloud.google.com/go/orchestration: v1.8.1 → v1.8.4 +- cloud.google.com/go/orgpolicy: v1.11.1 → v1.11.4 +- cloud.google.com/go/osconfig: v1.12.1 → v1.12.4 +- cloud.google.com/go/oslogin: v1.10.1 → v1.12.2 +- cloud.google.com/go/phishingprotection: v0.8.1 → v0.8.4 +- cloud.google.com/go/policytroubleshooter: v1.8.0 → v1.10.2 +- cloud.google.com/go/privatecatalog: v0.9.1 → v0.9.4 +- cloud.google.com/go/recaptchaenterprise/v2: v2.7.2 → v2.8.3 +- cloud.google.com/go/recommendationengine: v0.8.1 → v0.8.4 +- cloud.google.com/go/recommender: v1.10.1 → v1.11.3 +- cloud.google.com/go/redis: v1.13.1 → v1.14.1 +- cloud.google.com/go/resourcemanager: v1.9.1 → v1.9.4 +- cloud.google.com/go/resourcesettings: v1.6.1 → v1.6.4 +- cloud.google.com/go/retail: v1.14.1 → v1.14.4 +- cloud.google.com/go/run: v1.2.0 → v1.3.3 +- cloud.google.com/go/scheduler: v1.10.1 → v1.10.4 +- cloud.google.com/go/secretmanager: v1.11.1 → v1.11.4 +- cloud.google.com/go/security: v1.15.1 → v1.15.4 +- cloud.google.com/go/securitycenter: v1.23.0 → v1.24.2 +- cloud.google.com/go/servicedirectory: v1.11.0 → v1.11.3 +- cloud.google.com/go/shell: v1.7.1 → v1.7.4 +- cloud.google.com/go/spanner: v1.47.0 → v1.51.0 +- cloud.google.com/go/speech: v1.19.0 → v1.20.1 +- cloud.google.com/go/storage: v1.14.0 → v1.35.1 +- cloud.google.com/go/storagetransfer: v1.10.0 → v1.10.3 +- cloud.google.com/go/talent: v1.6.2 → v1.6.5 +- cloud.google.com/go/texttospeech: v1.7.1 → v1.7.4 +- cloud.google.com/go/tpu: v1.6.1 → v1.6.4 +- cloud.google.com/go/trace: v1.10.1 → v1.10.4 +- cloud.google.com/go/translate: v1.8.2 → v1.9.3 +- cloud.google.com/go/video: v1.19.0 → v1.20.3 +- cloud.google.com/go/videointelligence: v1.11.1 → v1.11.4 +- cloud.google.com/go/vision/v2: v2.7.2 → v2.7.5 +- cloud.google.com/go/vmmigration: v1.7.1 → v1.7.4 +- cloud.google.com/go/vmwareengine: v1.0.0 → v1.0.3 +- cloud.google.com/go/vpcaccess: v1.7.1 → v1.7.4 +- cloud.google.com/go/webrisk: v1.9.1 → v1.9.4 +- cloud.google.com/go/websecurityscanner: v1.6.1 → v1.6.4 +- cloud.google.com/go/workflows: v1.11.1 → v1.12.3 +- cloud.google.com/go: v0.110.7 → v0.110.10 +- github.com/cloudflare/circl: [v1.3.3 → v1.3.7](https://github.com/cloudflare/circl/compare/v1.3.3...v1.3.7) +- github.com/evanphx/json-patch/v5: [v5.7.0 → v5.9.0](https://github.com/evanphx/json-patch/compare/v5.7.0...v5.9.0) +- github.com/evanphx/json-patch: [v5.6.0+incompatible → v5.7.0+incompatible](https://github.com/evanphx/json-patch/compare/v5.6.0...v5.7.0) +- github.com/frankban/quicktest: [v1.14.4 → v1.14.6](https://github.com/frankban/quicktest/compare/v1.14.4...v1.14.6) +- github.com/fsnotify/fsnotify: [v1.6.0 → v1.7.0](https://github.com/fsnotify/fsnotify/compare/v1.6.0...v1.7.0) +- github.com/go-logr/logr: [v1.3.0 → v1.4.1](https://github.com/go-logr/logr/compare/v1.3.0...v1.4.1) +- github.com/go-logr/zapr: [v1.2.4 → v1.3.0](https://github.com/go-logr/zapr/compare/v1.2.4...v1.3.0) +- github.com/golang/mock: [v1.4.4 → v1.4.0](https://github.com/golang/mock/compare/v1.4.4...v1.4.0) +- github.com/google/cel-go: [v0.16.1 → v0.17.7](https://github.com/google/cel-go/compare/v0.16.1...v0.17.7) +- github.com/google/uuid: [v1.3.1 → v1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) +- github.com/googleapis/enterprise-certificate-proxy: [v0.3.1 → v0.3.2](https://github.com/googleapis/enterprise-certificate-proxy/compare/v0.3.1...v0.3.2) +- github.com/googleapis/google-cloud-go-testing: [bcd43fb → 1c9a4c6](https://github.com/googleapis/google-cloud-go-testing/compare/bcd43fb...1c9a4c6) +- github.com/gorilla/websocket: [v1.4.2 → v1.5.0](https://github.com/gorilla/websocket/compare/v1.4.2...v1.5.0) +- github.com/nats-io/nats.go: [v1.30.2 → v1.31.0](https://github.com/nats-io/nats.go/compare/v1.30.2...v1.31.0) +- github.com/nats-io/nkeys: [v0.4.5 → v0.4.6](https://github.com/nats-io/nkeys/compare/v0.4.5...v0.4.6) +- github.com/onsi/ginkgo/v2: [v2.13.1 → v2.16.0](https://github.com/onsi/ginkgo/compare/v2.13.1...v2.16.0) +- github.com/onsi/gomega: [v1.30.0 → v1.31.1](https://github.com/onsi/gomega/compare/v1.30.0...v1.31.1) +- github.com/pkg/sftp: [v1.13.1 → v1.13.6](https://github.com/pkg/sftp/compare/v1.13.1...v1.13.6) +- github.com/prometheus/client_golang: [v1.17.0 → v1.18.0](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0) +- github.com/prometheus/client_model: [9a2bf30 → v0.5.0](https://github.com/prometheus/client_model/compare/9a2bf30...v0.5.0) +- github.com/prometheus/common: [v0.44.0 → v0.45.0](https://github.com/prometheus/common/compare/v0.44.0...v0.45.0) +- github.com/prometheus/procfs: [v0.11.1 → v0.12.0](https://github.com/prometheus/procfs/compare/v0.11.1...v0.12.0) +- github.com/sagikazarmark/crypt: [v0.15.0 → v0.17.0](https://github.com/sagikazarmark/crypt/compare/v0.15.0...v0.17.0) +- github.com/sagikazarmark/locafero: [v0.3.0 → v0.4.0](https://github.com/sagikazarmark/locafero/compare/v0.3.0...v0.4.0) +- github.com/spf13/afero: [v1.10.0 → v1.11.0](https://github.com/spf13/afero/compare/v1.10.0...v1.11.0) +- github.com/spf13/cast: [v1.5.1 → v1.6.0](https://github.com/spf13/cast/compare/v1.5.1...v1.6.0) +- github.com/spf13/viper: [v1.17.0 → v1.18.2](https://github.com/spf13/viper/compare/v1.17.0...v1.18.2) +- go.etcd.io/bbolt: v1.3.7 → v1.3.8 +- go.etcd.io/etcd/api/v3: v3.5.10 → v3.5.12 +- go.etcd.io/etcd/client/pkg/v3: v3.5.10 → v3.5.12 +- go.etcd.io/etcd/client/v2: v2.305.9 → v2.305.10 +- go.etcd.io/etcd/client/v3: v3.5.10 → v3.5.12 +- go.etcd.io/etcd/pkg/v3: v3.5.9 → v3.5.10 +- go.etcd.io/etcd/raft/v3: v3.5.9 → v3.5.10 +- go.etcd.io/etcd/server/v3: v3.5.9 → v3.5.10 +- go.uber.org/zap: v1.25.0 → v1.26.0 +- golang.org/x/crypto: v0.15.0 → v0.19.0 +- golang.org/x/lint: 83fdc39 → 910be7a +- golang.org/x/mod: v0.13.0 → v0.14.0 +- golang.org/x/net: v0.18.0 → v0.21.0 +- golang.org/x/oauth2: v0.14.0 → v0.17.0 +- golang.org/x/sync: v0.4.0 → v0.6.0 +- golang.org/x/sys: v0.14.0 → v0.17.0 +- golang.org/x/term: v0.14.0 → v0.17.0 +- golang.org/x/time: v0.3.0 → v0.5.0 +- golang.org/x/tools: v0.14.0 → v0.17.0 +- google.golang.org/api: v0.143.0 → v0.153.0 +- google.golang.org/genproto/googleapis/api: 007df8e → bbf56f3 +- google.golang.org/genproto/googleapis/rpc: e6e6cda → 83a465c +- google.golang.org/genproto: 007df8e → bbf56f3 +- google.golang.org/protobuf: v1.31.0 → v1.33.0 +- honnef.co/go/tools: v0.0.1-2020.1.4 → v0.0.1-2019.2.3 +- k8s.io/api: v0.28.4 → v0.29.2 +- k8s.io/apiextensions-apiserver: v0.28.4 → v0.29.2 +- k8s.io/apimachinery: v0.28.4 → v0.29.2 +- k8s.io/apiserver: v0.28.4 → v0.29.2 +- k8s.io/cli-runtime: v0.28.4 → v0.29.2 +- k8s.io/client-go: v0.28.4 → v0.29.2 +- k8s.io/cluster-bootstrap: v0.28.4 → v0.29.2 +- k8s.io/code-generator: v0.28.4 → v0.29.2 +- k8s.io/component-base: v0.28.4 → v0.29.2 +- k8s.io/component-helpers: v0.28.4 → v0.29.2 +- k8s.io/gengo: c0856e2 → 9cce18d +- k8s.io/klog/v2: v2.100.1 → v2.110.1 +- k8s.io/kms: v0.28.4 → v0.29.2 +- k8s.io/kube-openapi: 2695361 → 2dd684a +- k8s.io/kubectl: v0.28.4 → v0.29.2 +- k8s.io/metrics: v0.28.4 → v0.29.2 +- k8s.io/utils: d93618c → b307cd5 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.1.2 → v0.28.0 +- sigs.k8s.io/controller-runtime: v0.16.3 → v0.17.2 +- sigs.k8s.io/structured-merge-diff/v4: v4.2.3 → v4.4.1 + +### Removed +- github.com/benbjohnson/clock: [v1.3.0](https://github.com/benbjohnson/clock/tree/v1.3.0) +- github.com/docker/distribution: [v2.8.3+incompatible](https://github.com/docker/distribution/tree/v2.8.3) +- github.com/google/martian/v3: [v3.1.0](https://github.com/google/martian/tree/v3.1.0) +- github.com/minio/highwayhash: [v1.0.2](https://github.com/minio/highwayhash/tree/v1.0.2) +- github.com/nats-io/jwt/v2: [v2.4.1](https://github.com/nats-io/jwt/tree/v2.4.1) +- go.opentelemetry.io/otel/exporters/otlp/internal/retry: v1.10.0 + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.7.0-beta.1.md b/CHANGELOG/v1.7.0-beta.1.md new file mode 100644 index 000000000000..8372bbefb1ad --- /dev/null +++ b/CHANGELOG/v1.7.0-beta.1.md @@ -0,0 +1,416 @@ +🚨 This is a BETA RELEASE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). + +## Highlights + +* Enable kubeadm feature gates mutation +* Add public function to create new CRD migrator +* Add MachineSetReady condition to MachineDeployment + +
+More details about the release + +:warning: **BETA RELEASE NOTES** :warning: + +## 👌 Kubernetes version support + +- Management Cluster: v1.26.x -> v1.29.x +- Workload Cluster: v1.24.x -> v1.29.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.6.0 +## :chart_with_upwards_trend: Overview +- 306 new commits merged +- 4 breaking changes :warning: +- 11 feature additions ✨ +- 37 bugs fixed 🐛 + +## :memo: Proposals +- MachinePool: Update MachinePool Machines proposal with diagrams (#9664) + +## :warning: Breaking Changes +- Dependency: Bump to controller-runtime v0.17.0 & k8s.io v0.29 (#9964) +- Logging: Make event recorder names consistent for KCP and topology controller (#9862) +- KCP/MachineSet: Objects generated by KCP, MachineSets and MachinePools will now consistently use machine name (#9833) + +## :sparkles: New Features +- API: Enable kubeadm feature gates mutation (#10154) +- clusterctl: Add k0smotron provider (#9991) +- clusterctl: Add public function to create new CRD migrator (#10075) +- Control-plane: Allow mutation of all fields that should be mutable (#9871) +- MachineDeployment: Add MachineSetReady condition to MachineDeployment (#9262) +- MachineDeployment: Taint nodes with PreferNoSchedule during rollouts (#10223) +- MachineHealthCheck: implement annotation to manually mark machines for remediation (#10202) +- Release: Add utility to create git issues on provider repo (#9110) +- Release: Add dependencies section to release notes tool (#10043) +- Testing: Resolve release markers (#9265) +- Testing: Prepare quickstart, capd and tests for the new release including kind bump (#9872) + +## :bug: Bug Fixes +- API: Use ptr instead of own implementation (#10276) +- API/e2e: Restore v1alpha3/v1alpha4 conversion to fix SSA issue & add e2e test coverage (#10147) +- CAPD: Fix ignition to also set the kube-proxy configuration to skip setting sysctls (#9894) +- CAPD: Remove --enable-hostpath-provisioner flag (#10271) +- CAPD: Remove duplicate fix for btrfs/zfs support (#8376) +- CABPK: Only refresh bootstrap token if needed, requeue in all cases where node hasn't joined yet (#9229) +- ClusterCacheTracker: Fix ClusterCacheTracker memory leak (#9543) +- ClusterCacheTracker: Use RequeueAfter instead of immediate requeue on ErrClusterLocked to not have exponentially increasing requeue time (#9810) +- clusterctl: Move handlePlugins function call out of init to allow debugging tests (#10200) +- clusterctl: Validate no objects exist from CRDs before deleting them (#9808) +- ClusterResourceSet: Requeue after 1 minute if ErrClusterLocked got hit (#9777) +- Control-plane: KCP should defer remediation when a control plane machine is still provisioning (#9734) +- Devtools: Tilt: Show default cluster-template (#9820) +- e2e: Add wait for MachineList to be available (#10301) +- e2e: Ci: fix unset GINKGO_FOCUS variable (#10085) +- e2e: Make MachinePools and MachineDeployments optional in ApplyClusterTemplateAndWait (#9960) +- e2e: Re-introduce exclude capi-webhook-system to fix test flake (#10157) +- e2e: Calculate correct worker count in clusterctl upgrade test (#9892) +- e2e: Fix finalizers test to not only rely on namespaced name (#9891) +- e2e: retry GetOwnerGraph in owner references test on certificate errors (#10201) +- IPAM: Fix webhooks using mixed api versions (#9861) +- KCP: Skip checking `clusterConfiguration.dns` fields when KCP checking MachineNeedRollout (#9857) +- Logging: Improve log k/v pairs and a improve/drop a few log lines (#9813) +- Machine: Bubble up machine drain condition in `MachinesReadyCondition` (#9355) +- Machine: Watch external objects for machine before deleting (#10041) +- MachinePool: Fix TestReconcileMachinePoolScaleToFromZero flakes (#9745) +- Release: Fix Community meeting area formatting in release notes (#9784) +- Release: Fix defaulting logic in release notes tool (#9958) +- Release: Fix documentation area comparison in release notes (#9769) +- Release: Fix kubeadm bootstrap prefix in release notes (#9814) +- Release: Fix wrong branch name display for weekly update script (#9918) +- Runtime SDK: Use keys/values structured log interface (#9998) +- Testing: Revert "Watch for Cluster resources in topology MD controller" (#9985) +- Testing: Reverting ginkgo.show-node-events to ginkgo.progress (#10214) +- Testing: fix flaky test TestPatch/Test patch with Machine (#9914) +- Testing: wait for topology to get rolled out before continuing with scaling checks (#9819) +- util: Add tests and update provider_issues.go (#10264) + +## :seedling: Others +- API: Stop relying on GVK being set on regular typed objects (#9956) +- Bootstrap: Add MachinePool test for switching bootstrap config to another ready/non-ready object with different name (#9616) +- CABPK: Add pod metadata to capbk manager (#10208) +- CAPD: Feat: make docker machine bootstrap timeout configurable (#9952) +- CAPD: Remove requeues in DockerMachinePool (#9725) +- CAPD: Support configuring ExtraPortMappings for the kind cluster (#10046) +- CAPIM: Add namespaces to the list of resource handled by the in-memory API server (#10297) +- CAPIM: Allow using different resource group and listener name with the in memory server (#10096) +- CAPIM: Make in memory API server more tolerant when starting (#10211) +- CAPIM: Make in memory runtime and server accessible from outside (#9986) +- CAPIM: Test/inmemory: use port only to identify the wcl to make port-forward… (#10245) +- CI: Add fail fast to DumpResourcesForCluster in case of no route to host (#10204) +- CI: Added go directive test (#10261) +- CI: Bump conversion-gen to v0.29.0 (#10012) +- CI: Bump go-apidiff to v0.8.2 (#10011) +- CI: Bump govulncheck to v1.0.4 (#10274) +- CI: Bump kpromo to v4.0.5 (#10140) +- CI: Bump kubebuilder envtest to 1.29.0 (#10013) +- CI: DumpResourcesForCluster should fail fast for i/o errors (#10238) +- CI: Ensure build images contains correct binary and for correct architecture (#9932) +- CI: Fix Make target generate-go-openapi (#10161) +- CI: Fix TestPatchNode flake (#10287) +- CI: Hack/prowjob-gen skip creating empty files (#10022) +- CI: Hack: implement generator for prowjobs (#9937) +- CI: Makefile: bump dependencies (#10236) +- CI: bump WorkloadKubernetesVersion for v1.6 clusterctl upgrade test (#10017) +- CI: adjust capd dockerfile so the binary exposes the package in the built binraries path variable (#10030) +- CI: Update GH actions to work with new release-1.6 branch (#9708) +- ClusterClass: Add ClusterClass variables metadata (#10308) +- ClusterClass: Add unit tests for MachinePools for webhooks and cc controller (#10055) +- ClusterClass: Add unit tests for MachinePools in topology/scope package (#10052) +- ClusterClass: Implement topology validations for topology kubernetes version upgrades (#10063) +- ClusterClass: Webhooks: use the alias for ClusterCacheTrackerReader instead of the internal reference (#10309) +- clusterctl: Add 0 default to worker-machine-count help (#10203) +- clusterctl: Add completion for fish shell (#9950) +- clusterctl: Add hivelocity infra provider to clusterctl (#10168) +- clusterctl: Add in-cluster ipam provider (#8811) +- clusterctl: Add Proxmox provider quickstart (#9798) +- clusterctl: Better verbose logging on override path (#10180) +- clusterctl: Bump cert-manager to 1.14.2 (#10126) +- clusterctl: Bump cert-manager to 1.14.4 (#10267) +- clusterctl: Bump cert-manager to v1.14.1 (#10113) +- clusterctl: Clarify rules for adding new clusterctl default providers (#9975) +- clusterctl: Deprecate clusterctl alpha topology plan (#10139) +- clusterctl: Replace context.TODO() from clusterctl proxy.go (#9776) +- Community meeting: Move CecileRobertMichon to emeritus (#10042) +- Community meeting: Move ykakarap to emeritus approvers (#10000) +- Community meeting: Promote chrischdi to Cluster API maintainer (#9997) +- Dependency: Bump `github.com/docker/docker` from 24.0.7 -> 25.0.0 (#10057) +- Dependency: Bump `golangci-lint` to v1.56.1 (#10124) +- Dependency: Bump controller-runtime to v0.17.1 (#10131) +- Dependency: Bump controller-runtime to v0.17.2 (#10162) +- Dependency: Bump controller-tools to v0.14 (#9987) +- Dependency: Bump crypto dep version (#9938) +- Dependency: Bump github.com/onsi/ginkgo/v2 from 2.16.0 to 2.17.0 (#10284) +- Dependency: Bump github.com/onsi/gomega from 1.31.1 to 1.32.0 (#10285) +- Dependency: Bump Go to 1.21.5 (#9900) +- Dependency: Bump go version to 1.21.8 (#10235) +- Dependency: Bump kind version to v0.22.0 (#10094) +- Dependency: Bump protobuf to v1.33.0 to address CVEs (#10248) +- Dependency: Bump the kubernetes group to v0.28.5 (#9933) +- Dependency: Bump the kubernetes group with 8 updates (#10286) +- Dependency: Bump to Go 1.20.12 (#9840) +- Dependency: Bump github.com/docker/docker from 25.0.4+incompatible to 25.0.5+incompatible (#10302) +- Dependency: Go.mod: bump github.com/cloudflare/circl to v1.3.7 (#10001) +- Devtools: Add missing folder to clean-tilt make target (#9934) +- Devtools: Allow tilt provider with pre-build images (#10244) +- Devtools: Explicitly set golangci config for sub modules (#9821) +- Devtools: Fix variables names in Tiltfile (#9811) +- Devtools: Implement privileged namespace security policy update for tilt-prepare (#10178) +- Devtools: Simplify testing nightly builds with clusterctl (#10018) +- Devtools: Small improvements to tilt (#9936) +- e2e: Add conformance e2e tests (#10060) +- e2e: Add DeepCopy method for E2EConfig (#9988) +- e2e: Add PostCreateNamespace hook to E2E tests (#10067) +- e2e: Add test util to resolve Kubernetes versions (#9995) +- e2e: Allow to specify cluster name for E2E quick-start (#10088) +- e2e: Bump cluster-autoscaler to v1.29.0 (#9942) +- e2e: Drop duplicated scale test (#9968) +- e2e: Fix ci-latest test to actually use ci latest (#10080) +- e2e: Fix CRS e2e helper with multiple bindings (#10191) +- e2e: Improve output of exec.KubectlApply (#9737) +- e2e: Make etcd and CoreDNS optional in upgrade and self-hosted tests (#9963) +- e2e: add WatchDaemonSetLogsByLabelSelector method (#9984) +- e2e: expose CopyAndAmendClusterctlConfig function (#10086) +- e2e: combine Finalizers tests to default quick start tests (#10039) +- e2e: Use --wait-providers in test framework InitWithBinary func (#10149) +- e2e: Use manager in test extension (#10097) +- e2e: Add scale testing for upgrades (#9077) +- Machine: Be more explicit when skipping node deletion (#10137) +- Machine: Mark Machine healthy condition as unknown if we can't list wl nodes (#9864) +- MachineDeployment: Set revision on machinedeployment and patch via defer (#10160) +- MachinePool: Use SSA Patch to create machines in MP controller (#9791) +- MachineSet: improve replica defaulting for autoscaler (#9649) +- KCP/MachineSet: remove hardcoded WithStepCounterIf(false) (#9772) +- Release: Add integration test for release notes tool (#9617) +- Release: Bump kubebuilder-release-tools to v0.4.3 (#9818) +- Release: Changelog entry to include cert-manager to v1.14.2 (#10188) +- Release: Fix dockerfile for clusterctl build (#10058) +- Release: Fix grammar in release script (#9981) +- Release: Improve release-staging build (#10095) +- Release: Improve weekly PR update generation script and documentation (#10092) +- Release: Make release notes tool not dependent on local git (#9618) +- Release: Prep main branch for v1.7 development (#9799) +- Release: Provide faster support for new k8s minor releases (#9971) +- Release: Read in dockerfiles from stdin (#9990) +- Release: Release v1.5.7 (#10251) +- Release: Release v1.6.3 (#10252) +- Release: Swap in new 1.7 RT members in OWNERS (#9855) +- Runtime SDK: Add more helper functions in topologymutation varaible.go to help unmarshal variables (#9670) +- Testing: Drop policy v1beta1 API support in the testing framework (#10158) +- Testing: Drop unused runtime features for the in-memory provider (#9778) +- Testing: Fix typo in test framework (#9873) +- Testing: Fixing kubetest warnings for deprecations (#10172) +- Testing: Print conformance image used in kubetest (#10076) +- Testing: Remove k8s.io/utils/pointer from repository (#9836) +- Testing: Small improvements to the inmemory api server (#9935) +- Testing: add dynamic finalizer assertions for e2e framework (#9970) +- Testing: Watch for Cluster resources in topology MachineSet & MachineDeployment controllers (#10029) +- Testing: Watch for Cluster resources in topology MD controller (#9865) +- util: Improve patch helper error handling (#9946) +- util: Use min/max funcs from Go SDK (#9945) + +:book: Additionally, there have been 46 contributions to our documentation and book. (#10005, #10031, #10040, #10061, #10066, #10068, #10084, #10099, #10100, #10115, #10122, #10170, #10174, #10194, #10239, #10257, #10268, #10288, #9585, #9640, #9767, #9771, #9779, #9782, #9786, #9794, #9797, #9801, #9817, #9829, #9831, #9838, #9856, #9866, #9867, #9868, #9876, #9896, #9897, #9908, #9941, #9949, #9957, #9961, #9972, #9993) + +## Dependencies + +### Added +- github.com/matttproud/golang_protobuf_extensions/v2: [v2.0.0](https://github.com/matttproud/golang_protobuf_extensions/tree/v2.0.0) + +### Changed +- cloud.google.com/go/accessapproval: v1.7.1 → v1.7.4 +- cloud.google.com/go/accesscontextmanager: v1.8.1 → v1.8.4 +- cloud.google.com/go/aiplatform: v1.48.0 → v1.52.0 +- cloud.google.com/go/analytics: v0.21.3 → v0.21.6 +- cloud.google.com/go/apigateway: v1.6.1 → v1.6.4 +- cloud.google.com/go/apigeeconnect: v1.6.1 → v1.6.4 +- cloud.google.com/go/apigeeregistry: v0.7.1 → v0.8.2 +- cloud.google.com/go/appengine: v1.8.1 → v1.8.4 +- cloud.google.com/go/area120: v0.8.1 → v0.8.4 +- cloud.google.com/go/artifactregistry: v1.14.1 → v1.14.6 +- cloud.google.com/go/asset: v1.14.1 → v1.15.3 +- cloud.google.com/go/assuredworkloads: v1.11.1 → v1.11.4 +- cloud.google.com/go/automl: v1.13.1 → v1.13.4 +- cloud.google.com/go/baremetalsolution: v1.1.1 → v1.2.3 +- cloud.google.com/go/batch: v1.3.1 → v1.6.3 +- cloud.google.com/go/beyondcorp: v1.0.0 → v1.0.3 +- cloud.google.com/go/bigquery: v1.53.0 → v1.57.1 +- cloud.google.com/go/billing: v1.16.0 → v1.17.4 +- cloud.google.com/go/binaryauthorization: v1.6.1 → v1.7.3 +- cloud.google.com/go/certificatemanager: v1.7.1 → v1.7.4 +- cloud.google.com/go/channel: v1.16.0 → v1.17.3 +- cloud.google.com/go/cloudbuild: v1.13.0 → v1.14.3 +- cloud.google.com/go/clouddms: v1.6.1 → v1.7.3 +- cloud.google.com/go/cloudtasks: v1.12.1 → v1.12.4 +- cloud.google.com/go/compute: v1.23.0 → v1.23.3 +- cloud.google.com/go/contactcenterinsights: v1.10.0 → v1.11.3 +- cloud.google.com/go/container: v1.24.0 → v1.27.1 +- cloud.google.com/go/containeranalysis: v0.10.1 → v0.11.3 +- cloud.google.com/go/datacatalog: v1.16.0 → v1.18.3 +- cloud.google.com/go/dataflow: v0.9.1 → v0.9.4 +- cloud.google.com/go/dataform: v0.8.1 → v0.9.1 +- cloud.google.com/go/datafusion: v1.7.1 → v1.7.4 +- cloud.google.com/go/datalabeling: v0.8.1 → v0.8.4 +- cloud.google.com/go/dataplex: v1.9.0 → v1.11.1 +- cloud.google.com/go/dataproc/v2: v2.0.1 → v2.2.3 +- cloud.google.com/go/dataqna: v0.8.1 → v0.8.4 +- cloud.google.com/go/datastore: v1.13.0 → v1.15.0 +- cloud.google.com/go/datastream: v1.10.0 → v1.10.3 +- cloud.google.com/go/deploy: v1.13.0 → v1.14.2 +- cloud.google.com/go/dialogflow: v1.40.0 → v1.44.3 +- cloud.google.com/go/dlp: v1.10.1 → v1.11.1 +- cloud.google.com/go/documentai: v1.22.0 → v1.23.5 +- cloud.google.com/go/domains: v0.9.1 → v0.9.4 +- cloud.google.com/go/edgecontainer: v1.1.1 → v1.1.4 +- cloud.google.com/go/essentialcontacts: v1.6.2 → v1.6.5 +- cloud.google.com/go/eventarc: v1.13.0 → v1.13.3 +- cloud.google.com/go/filestore: v1.7.1 → v1.7.4 +- cloud.google.com/go/firestore: v1.13.0 → v1.14.0 +- cloud.google.com/go/functions: v1.15.1 → v1.15.4 +- cloud.google.com/go/gkebackup: v1.3.0 → v1.3.4 +- cloud.google.com/go/gkeconnect: v0.8.1 → v0.8.4 +- cloud.google.com/go/gkehub: v0.14.1 → v0.14.4 +- cloud.google.com/go/gkemulticloud: v1.0.0 → v1.0.3 +- cloud.google.com/go/gsuiteaddons: v1.6.1 → v1.6.4 +- cloud.google.com/go/iam: v1.1.1 → v1.1.5 +- cloud.google.com/go/iap: v1.8.1 → v1.9.3 +- cloud.google.com/go/ids: v1.4.1 → v1.4.4 +- cloud.google.com/go/iot: v1.7.1 → v1.7.4 +- cloud.google.com/go/kms: v1.15.0 → v1.15.5 +- cloud.google.com/go/language: v1.10.1 → v1.12.2 +- cloud.google.com/go/lifesciences: v0.9.1 → v0.9.4 +- cloud.google.com/go/logging: v1.7.0 → v1.8.1 +- cloud.google.com/go/longrunning: v0.5.1 → v0.5.4 +- cloud.google.com/go/managedidentities: v1.6.1 → v1.6.4 +- cloud.google.com/go/maps: v1.4.0 → v1.6.1 +- cloud.google.com/go/mediatranslation: v0.8.1 → v0.8.4 +- cloud.google.com/go/memcache: v1.10.1 → v1.10.4 +- cloud.google.com/go/metastore: v1.12.0 → v1.13.3 +- cloud.google.com/go/monitoring: v1.15.1 → v1.16.3 +- cloud.google.com/go/networkconnectivity: v1.12.1 → v1.14.3 +- cloud.google.com/go/networkmanagement: v1.8.0 → v1.9.3 +- cloud.google.com/go/networksecurity: v0.9.1 → v0.9.4 +- cloud.google.com/go/notebooks: v1.9.1 → v1.11.2 +- cloud.google.com/go/optimization: v1.4.1 → v1.6.2 +- cloud.google.com/go/orchestration: v1.8.1 → v1.8.4 +- cloud.google.com/go/orgpolicy: v1.11.1 → v1.11.4 +- cloud.google.com/go/osconfig: v1.12.1 → v1.12.4 +- cloud.google.com/go/oslogin: v1.10.1 → v1.12.2 +- cloud.google.com/go/phishingprotection: v0.8.1 → v0.8.4 +- cloud.google.com/go/policytroubleshooter: v1.8.0 → v1.10.2 +- cloud.google.com/go/privatecatalog: v0.9.1 → v0.9.4 +- cloud.google.com/go/recaptchaenterprise/v2: v2.7.2 → v2.8.3 +- cloud.google.com/go/recommendationengine: v0.8.1 → v0.8.4 +- cloud.google.com/go/recommender: v1.10.1 → v1.11.3 +- cloud.google.com/go/redis: v1.13.1 → v1.14.1 +- cloud.google.com/go/resourcemanager: v1.9.1 → v1.9.4 +- cloud.google.com/go/resourcesettings: v1.6.1 → v1.6.4 +- cloud.google.com/go/retail: v1.14.1 → v1.14.4 +- cloud.google.com/go/run: v1.2.0 → v1.3.3 +- cloud.google.com/go/scheduler: v1.10.1 → v1.10.4 +- cloud.google.com/go/secretmanager: v1.11.1 → v1.11.4 +- cloud.google.com/go/security: v1.15.1 → v1.15.4 +- cloud.google.com/go/securitycenter: v1.23.0 → v1.24.2 +- cloud.google.com/go/servicedirectory: v1.11.0 → v1.11.3 +- cloud.google.com/go/shell: v1.7.1 → v1.7.4 +- cloud.google.com/go/spanner: v1.47.0 → v1.51.0 +- cloud.google.com/go/speech: v1.19.0 → v1.20.1 +- cloud.google.com/go/storage: v1.14.0 → v1.35.1 +- cloud.google.com/go/storagetransfer: v1.10.0 → v1.10.3 +- cloud.google.com/go/talent: v1.6.2 → v1.6.5 +- cloud.google.com/go/texttospeech: v1.7.1 → v1.7.4 +- cloud.google.com/go/tpu: v1.6.1 → v1.6.4 +- cloud.google.com/go/trace: v1.10.1 → v1.10.4 +- cloud.google.com/go/translate: v1.8.2 → v1.9.3 +- cloud.google.com/go/video: v1.19.0 → v1.20.3 +- cloud.google.com/go/videointelligence: v1.11.1 → v1.11.4 +- cloud.google.com/go/vision/v2: v2.7.2 → v2.7.5 +- cloud.google.com/go/vmmigration: v1.7.1 → v1.7.4 +- cloud.google.com/go/vmwareengine: v1.0.0 → v1.0.3 +- cloud.google.com/go/vpcaccess: v1.7.1 → v1.7.4 +- cloud.google.com/go/webrisk: v1.9.1 → v1.9.4 +- cloud.google.com/go/websecurityscanner: v1.6.1 → v1.6.4 +- cloud.google.com/go/workflows: v1.11.1 → v1.12.3 +- cloud.google.com/go: v0.110.7 → v0.110.10 +- github.com/cloudflare/circl: [v1.3.3 → v1.3.7](https://github.com/cloudflare/circl/compare/v1.3.3...v1.3.7) +- github.com/evanphx/json-patch/v5: [v5.7.0 → v5.9.0](https://github.com/evanphx/json-patch/compare/v5.7.0...v5.9.0) +- github.com/evanphx/json-patch: [v5.6.0+incompatible → v5.7.0+incompatible](https://github.com/evanphx/json-patch/compare/v5.6.0...v5.7.0) +- github.com/frankban/quicktest: [v1.14.4 → v1.14.6](https://github.com/frankban/quicktest/compare/v1.14.4...v1.14.6) +- github.com/fsnotify/fsnotify: [v1.6.0 → v1.7.0](https://github.com/fsnotify/fsnotify/compare/v1.6.0...v1.7.0) +- github.com/go-logr/logr: [v1.3.0 → v1.4.1](https://github.com/go-logr/logr/compare/v1.3.0...v1.4.1) +- github.com/go-logr/zapr: [v1.2.4 → v1.3.0](https://github.com/go-logr/zapr/compare/v1.2.4...v1.3.0) +- github.com/golang/mock: [v1.4.4 → v1.4.0](https://github.com/golang/mock/compare/v1.4.4...v1.4.0) +- github.com/golang/protobuf: [v1.5.3 → v1.5.4](https://github.com/golang/protobuf/compare/v1.5.3...v1.5.4) +- github.com/google/cel-go: [v0.16.1 → v0.17.7](https://github.com/google/cel-go/compare/v0.16.1...v0.17.7) +- github.com/google/uuid: [v1.3.1 → v1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) +- github.com/googleapis/enterprise-certificate-proxy: [v0.3.1 → v0.3.2](https://github.com/googleapis/enterprise-certificate-proxy/compare/v0.3.1...v0.3.2) +- github.com/googleapis/google-cloud-go-testing: [bcd43fb → 1c9a4c6](https://github.com/googleapis/google-cloud-go-testing/compare/bcd43fb...1c9a4c6) +- github.com/gorilla/websocket: [v1.4.2 → v1.5.0](https://github.com/gorilla/websocket/compare/v1.4.2...v1.5.0) +- github.com/nats-io/nats.go: [v1.30.2 → v1.31.0](https://github.com/nats-io/nats.go/compare/v1.30.2...v1.31.0) +- github.com/nats-io/nkeys: [v0.4.5 → v0.4.6](https://github.com/nats-io/nkeys/compare/v0.4.5...v0.4.6) +- github.com/onsi/ginkgo/v2: [v2.13.1 → v2.17.1](https://github.com/onsi/ginkgo/compare/v2.13.1...v2.17.0) +- github.com/onsi/gomega: [v1.30.0 → v1.32.0](https://github.com/onsi/gomega/compare/v1.30.0...v1.32.0) +- github.com/pkg/sftp: [v1.13.1 → v1.13.6](https://github.com/pkg/sftp/compare/v1.13.1...v1.13.6) +- github.com/prometheus/client_golang: [v1.17.0 → v1.18.0](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0) +- github.com/prometheus/client_model: [9a2bf30 → v0.5.0](https://github.com/prometheus/client_model/compare/9a2bf30...v0.5.0) +- github.com/prometheus/common: [v0.44.0 → v0.45.0](https://github.com/prometheus/common/compare/v0.44.0...v0.45.0) +- github.com/prometheus/procfs: [v0.11.1 → v0.12.0](https://github.com/prometheus/procfs/compare/v0.11.1...v0.12.0) +- github.com/sagikazarmark/crypt: [v0.15.0 → v0.17.0](https://github.com/sagikazarmark/crypt/compare/v0.15.0...v0.17.0) +- github.com/sagikazarmark/locafero: [v0.3.0 → v0.4.0](https://github.com/sagikazarmark/locafero/compare/v0.3.0...v0.4.0) +- github.com/spf13/afero: [v1.10.0 → v1.11.0](https://github.com/spf13/afero/compare/v1.10.0...v1.11.0) +- github.com/spf13/cast: [v1.5.1 → v1.6.0](https://github.com/spf13/cast/compare/v1.5.1...v1.6.0) +- github.com/spf13/viper: [v1.17.0 → v1.18.2](https://github.com/spf13/viper/compare/v1.17.0...v1.18.2) +- go.etcd.io/bbolt: v1.3.7 → v1.3.8 +- go.etcd.io/etcd/api/v3: v3.5.10 → v3.5.12 +- go.etcd.io/etcd/client/pkg/v3: v3.5.10 → v3.5.12 +- go.etcd.io/etcd/client/v2: v2.305.9 → v2.305.10 +- go.etcd.io/etcd/client/v3: v3.5.10 → v3.5.12 +- go.etcd.io/etcd/pkg/v3: v3.5.9 → v3.5.10 +- go.etcd.io/etcd/raft/v3: v3.5.9 → v3.5.10 +- go.etcd.io/etcd/server/v3: v3.5.9 → v3.5.10 +- go.uber.org/zap: v1.25.0 → v1.26.0 +- golang.org/x/crypto: v0.15.0 → v0.21.0 +- golang.org/x/lint: 83fdc39 → 910be7a +- golang.org/x/mod: v0.13.0 → v0.14.0 +- golang.org/x/net: v0.18.0 → v0.22.0 +- golang.org/x/oauth2: v0.14.0 → v0.18.0 +- golang.org/x/sync: v0.4.0 → v0.6.0 +- golang.org/x/sys: v0.14.0 → v0.18.0 +- golang.org/x/term: v0.14.0 → v0.18.0 +- golang.org/x/time: v0.3.0 → v0.5.0 +- golang.org/x/tools: v0.14.0 → v0.17.0 +- google.golang.org/api: v0.143.0 → v0.153.0 +- google.golang.org/genproto/googleapis/api: 007df8e → bbf56f3 +- google.golang.org/genproto/googleapis/rpc: e6e6cda → 83a465c +- google.golang.org/genproto: 007df8e → bbf56f3 +- google.golang.org/protobuf: v1.31.0 → v1.33.0 +- honnef.co/go/tools: v0.0.1-2020.1.4 → v0.0.1-2019.2.3 +- k8s.io/api: v0.28.4 → v0.29.3 +- k8s.io/apiextensions-apiserver: v0.28.4 → v0.29.3 +- k8s.io/apimachinery: v0.28.4 → v0.29.3 +- k8s.io/apiserver: v0.28.4 → v0.29.3 +- k8s.io/cli-runtime: v0.28.4 → v0.29.3 +- k8s.io/client-go: v0.28.4 → v0.29.3 +- k8s.io/cluster-bootstrap: v0.28.4 → v0.29.3 +- k8s.io/code-generator: v0.28.4 → v0.29.3 +- k8s.io/component-base: v0.28.4 → v0.29.3 +- k8s.io/component-helpers: v0.28.4 → v0.29.3 +- k8s.io/gengo: c0856e2 → 9cce18d +- k8s.io/klog/v2: v2.100.1 → v2.110.1 +- k8s.io/kms: v0.28.4 → v0.29.3 +- k8s.io/kube-openapi: 2695361 → 2dd684a +- k8s.io/kubectl: v0.28.4 → v0.29.3 +- k8s.io/metrics: v0.28.4 → v0.29.3 +- k8s.io/utils: d93618c → b307cd5 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.1.2 → v0.28.0 +- sigs.k8s.io/controller-runtime: v0.16.3 → v0.17.2 +- sigs.k8s.io/structured-merge-diff/v4: v4.2.3 → v4.4.1 + +### Removed +- github.com/benbjohnson/clock: [v1.3.0](https://github.com/benbjohnson/clock/tree/v1.3.0) +- github.com/docker/distribution: [v2.8.3+incompatible](https://github.com/docker/distribution/tree/v2.8.3) +- github.com/google/martian/v3: [v3.1.0](https://github.com/google/martian/tree/v3.1.0) +- github.com/minio/highwayhash: [v1.0.2](https://github.com/minio/highwayhash/tree/v1.0.2) +- github.com/nats-io/jwt/v2: [v2.4.1](https://github.com/nats-io/jwt/tree/v2.4.1) +- go.opentelemetry.io/otel/exporters/otlp/internal/retry: v1.10.0 + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.7.0-rc.0.md b/CHANGELOG/v1.7.0-rc.0.md new file mode 100644 index 000000000000..e408d5f87152 --- /dev/null +++ b/CHANGELOG/v1.7.0-rc.0.md @@ -0,0 +1,464 @@ +🚨 This is a RELEASE CANDIDATE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). +## Highlights + +* Enable kubeadm feature gates mutation +* Add public function to create new CRD migrator +* Add MachineSetReady condition to MachineDeployment + +## Changes since v1.7.0-beta.1 +## :chart_with_upwards_trend: Overview +- 16 new commits merged +- 1 breaking change :warning: +- 1 feature addition ✨ +- 5 bugs fixed 🐛 + +## :warning: Breaking Changes +- MachinePool: Set MachinePool feature flag default to true + Beta (#10141) + +## :sparkles: New Features +- Release: Add release notes expander functionality (#10091) + +## :bug: Bug Fixes +- ClusterClass: Fix for TestServerSideApplyWithDefaulting (#10307) +- ClusterClass: Improve handling of topology orphaned objects (#10277) +- e2e: Fix clusterctl upgrade e2e tests (enable CRS) (#10340) +- Runtime SDK: Fix ClusterClass variables status & RuntimeExtension and add test coverage (#10337) +- Testing: Fix using corerect testing.T when creating gomega object (#10342) + +## :seedling: Others +- Release: Determine release type from tag to also handle beta releases (#10324) +- Runtime SDK: Enable integration tests of RuntimeExtensions (#10330) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- go.etcd.io/etcd/api/v3: v3.5.12 → v3.5.13 +- go.etcd.io/etcd/client/pkg/v3: v3.5.12 → v3.5.13 +- go.etcd.io/etcd/client/v3: v3.5.12 → v3.5.13 + +### Removed +_Nothing has changed._ + +
+More details about the release + +:warning: **RELEASE CANDIDATE NOTES** :warning: +## 👌 Kubernetes version support + +- Management Cluster: v1.26.x -> v1.29.x +- Workload Cluster: v1.24.x -> v1.29.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.6.0 +## :chart_with_upwards_trend: Overview +- 323 new commits merged +- 5 breaking changes :warning: +- 12 feature additions ✨ +- 42 bugs fixed 🐛 + +## :memo: Proposals +- MachinePool: Update MachinePool Machines proposal with diagrams (#9664) + +## :warning: Breaking Changes +- API: Remove v1alpha4 API Version (#9939) +- Dependency: Bump to controller-runtime v0.17.0 & k8s.io v0.29 (#9964) +- Logging: Make event recorder names consistent for KCP and topology controller (#9862) +- MachinePool: Set MachinePool feature flag default to true + Beta (#10141) +- KCP/MachineSet: Objects generated by KCP, MachineSets and MachinePools will now consistently use machine name (#9833) + +## :sparkles: New Features +- API: Enable kubeadm feature gates mutation (#10154) +- clusterctl: Add k0smotron provider (#9991) +- clusterctl: Add public function to create new CRD migrator (#10075) +- Control-plane: Allow mutation of all fields that should be mutable (#9871) +- MachineDeployment: Add MachineSetReady condition to MachineDeployment (#9262) +- MachineDeployment: Taint nodes with PreferNoSchedule during rollouts (#10223) +- MachineHealthCheck: implement annotation to manually mark machines for remediation (#10202) +- Release: Add utility to create git issues on provider repo (#9110) +- Release: Add dependencies section to release notes tool (#10043) +- Release: Add release notes expander functionality (#10091) +- Testing: Resolve release markers (#9265) +- Testing: Prepare quickstart, capd and tests for the new release including kind bump (#9872) + +## :bug: Bug Fixes +- API: Use ptr instead of own implementation (#10276) +- API/e2e: Restore v1alpha3/v1alpha4 conversion to fix SSA issue & add e2e test coverage (#10147) +- CABPK: Only refresh bootstrap token if needed, requeue in all cases where node hasn't joined yet (#9229) +- CAPD: Fix ignition to also set the kube-proxy configuration to skip setting sysctls (#9894) +- CAPD: Remove --enable-hostpath-provisioner flag (#10271) +- CAPD: Remove duplicate fix for btrfs/zfs support (#8376) +- ClusterCacheTracker: Fix ClusterCacheTracker memory leak (#9543) +- ClusterCacheTracker: Use RequeueAfter instead of immediate requeue on ErrClusterLocked to not have exponentially increasing requeue time (#9810) +- ClusterClass: Fix for TestServerSideApplyWithDefaulting (#10307) +- ClusterClass: Improve handling of topology orphaned objects (#10277) +- clusterctl: Move handlePlugins function call out of init to allow debugging tests (#10200) +- clusterctl: Validate no objects exist from CRDs before deleting them (#9808) +- ClusterResourceSet: Requeue after 1 minute if ErrClusterLocked got hit (#9777) +- Control-plane: KCP should defer remediation when a control plane machine is still provisioning (#9734) +- Devtools: Show default cluster-template (#9820) +- e2e: Add wait for MachineList to be available (#10301) +- e2e: fix unset GINKGO_FOCUS variable (#10085) +- e2e: Fix clusterctl upgrade e2e tests (enable CRS) (#10340) +- e2e: Make MachinePools and MachineDeployments optional in ApplyClusterTemplateAndWait (#9960) +- e2e: Re-introduce exclude capi-webhook-system to fix test flake (#10157) +- e2e: Calculate correct worker count in clusterctl upgrade test (#9892) +- e2e: Fix finalizers test to not only rely on namespaced name (#9891) +- e2e: retry GetOwnerGraph in owner references test on certificate errors (#10201) +- IPAM: Fix webhooks using mixed api versions (#9861) +- KCP: Skip checking `clusterConfiguration.dns` fields when KCP checking MachineNeedRollout (#9857) +- Logging: Improve log k/v pairs and a improve/drop a few log lines (#9813) +- Machine: Bubble up machine drain condition in `MachinesReadyCondition` (#9355) +- Machine: Watch external objects for machine before deleting (#10041) +- MachinePool: Fix TestReconcileMachinePoolScaleToFromZero flakes (#9745) +- Release: Fix Community meeting area formatting in release notes (#9784) +- Release: Fix defaulting logic in release notes tool (#9958) +- Release: Fix documentation area comparison in release notes (#9769) +- Release: Fix kubeadm bootstrap prefix in release notes (#9814) +- Release: Fix wrong branch name display for weekly update script (#9918) +- Runtime SDK: Fix ClusterClass variables status & RuntimeExtension and add test coverage (#10337) +- Runtime SDK: Use keys/values structured log interface (#9998) +- Testing: Fix using corerect testing.T when creating gomega object (#10342) +- Testing: Revert "Watch for Cluster resources in topology MD controller" (#9985) +- Testing: Reverting ginkgo.show-node-events to ginkgo.progress (#10214) +- Testing: fix flaky test TestPatch/Test patch with Machine (#9914) +- Testing: wait for topology to get rolled out before continuing with scaling checks (#9819) +- util: Add tests and update provider_issues.go (#10264) + +## :seedling: Others +- API: Stop relying on GVK being set on regular typed objects (#9956) +- Bootstrap: Add MachinePool test for switching bootstrap config to another ready/non-ready object with different name (#9616) +- CABPK: Add pod metadata to capbk manager (#10208) +- CAPD: make docker machine bootstrap timeout configurable (#9952) +- CAPD: Remove requeues in DockerMachinePool (#9725) +- CAPD: Support configuring ExtraPortMappings for the kind cluster (#10046) +- CAPIM: Add namespaces to the list of resource handled by the in-memory API server (#10297) +- CAPIM: Allow using different resource group and listener name with the in memory server (#10096) +- CAPIM: Make in memory API server more tolerant when starting (#10211) +- CAPIM: Make in memory runtime and server accessible from outside (#9986) +- CAPIM: use port only to identify the wcl to make port-forward… (#10245) +- CI: Add fail fast to DumpResourcesForCluster in case of no route to host (#10204) +- CI: Added go directive test (#10261) +- CI: Bump conversion-gen to v0.29.0 (#10012) +- CI: Bump go-apidiff to v0.8.2 (#10011) +- CI: Bump govulncheck to v1.0.4 (#10274) +- CI: Bump kpromo to v4.0.5 (#10140) +- CI: Bump kubebuilder envtest to 1.29.0 (#10013) +- CI: DumpResourcesForCluster should fail fast for i/o errors (#10238) +- CI: Ensure build images contains correct binary and for correct architecture (#9932) +- CI: Fix Make target generate-go-openapi (#10161) +- CI: Fix TestPatchNode flake (#10287) +- CI: Hack/prowjob-gen skip creating empty files (#10022) +- CI: implement generator for prowjobs (#9937) +- CI: bump dependencies (#10236) +- CI: bump WorkloadKubernetesVersion for v1.6 clusterctl upgrade test (#10017) +- CI: adjust capd dockerfile so the binary exposes the package in the built binraries path variable (#10030) +- CI: Update GH actions to work with new release-1.6 branch (#9708) +- ClusterClass: Add ClusterClass variables metadata (#10308) +- ClusterClass: Add unit tests for MachinePools for webhooks and cc controller (#10055) +- ClusterClass: Add unit tests for MachinePools in topology/scope package (#10052) +- ClusterClass: Implement topology validations for topology kubernetes version upgrades (#10063) +- ClusterClass: use the alias for ClusterCacheTrackerReader instead of the internal reference (#10309) +- clusterctl: Add 0 default to worker-machine-count help (#10203) +- clusterctl: Add completion for fish shell (#9950) +- clusterctl: Add hivelocity infra provider to clusterctl (#10168) +- clusterctl: Add in-cluster ipam provider (#8811) +- clusterctl: Add Proxmox provider quickstart (#9798) +- clusterctl: Better verbose logging on override path (#10180) +- clusterctl: Bump cert-manager to 1.14.2 (#10126) +- clusterctl: Bump cert-manager to 1.14.4 (#10267) +- clusterctl: Bump cert-manager to v1.14.1 (#10113) +- clusterctl: Clarify rules for adding new clusterctl default providers (#9975) +- clusterctl: Deprecate clusterctl alpha topology plan (#10139) +- clusterctl: Replace context.TODO() from clusterctl proxy.go (#9776) +- Community meeting: Move CecileRobertMichon to emeritus (#10042) +- Community meeting: Move ykakarap to emeritus approvers (#10000) +- Community meeting: Promote chrischdi to Cluster API maintainer (#9997) +- Dependency: Bump `github.com/docker/docker` from 24.0.7 -> 25.0.0 (#10057) +- Dependency: Bump `golangci-lint` to v1.56.1 (#10124) +- Dependency: Bump controller-runtime to v0.17.1 (#10131) +- Dependency: Bump controller-runtime to v0.17.2 (#10162) +- Dependency: Bump controller-tools to v0.14 (#9987) +- Dependency: Bump crypto dep version (#9938) +- Dependency: Bump github.com/onsi/ginkgo/v2 from 2.16.0 to 2.17.0 (#10284) +- Dependency: Bump github.com/onsi/gomega from 1.31.1 to 1.32.0 (#10285) +- Dependency: Bump Go to 1.21.5 (#9900) +- Dependency: Bump go version to 1.21.8 (#10235) +- Dependency: Bump kind version to v0.22.0 (#10094) +- Dependency: Bump protobuf to v1.33.0 to address CVEs (#10248) +- Dependency: Bump the kubernetes group to v0.28.5 (#9933) +- Dependency: Bump the kubernetes group with 8 updates (#10286) +- Dependency: Bump to Go 1.20.12 (#9840) +- Dependency: bump github.com/cloudflare/circl to v1.3.7 (#10001) +- Devtools: Add missing folder to clean-tilt make target (#9934) +- Devtools: Allow tilt provider with pre-build images (#10244) +- Devtools: Explicitly set golangci config for sub modules (#9821) +- Devtools: Fix variables names in Tiltfile (#9811) +- Devtools: Implement privileged namespace security policy update for tilt-prepare (#10178) +- Devtools: Simplify testing nightly builds with clusterctl (#10018) +- Devtools: Small improvements to tilt (#9936) +- e2e: Add conformance e2e tests (#10060) +- e2e: Add DeepCopy method for E2EConfig (#9988) +- e2e: Add PostCreateNamespace hook to E2E tests (#10067) +- e2e: Add test util to resolve Kubernetes versions (#9995) +- e2e: Allow to specify cluster name for E2E quick-start (#10088) +- e2e: Bump cluster-autoscaler to v1.29.0 (#9942) +- e2e: Drop duplicated scale test (#9968) +- e2e: Fix ci-latest test to actually use ci latest (#10080) +- e2e: Fix CRS e2e helper with multiple bindings (#10191) +- e2e: Improve output of exec.KubectlApply (#9737) +- e2e: Make etcd and CoreDNS optional in upgrade and self-hosted tests (#9963) +- e2e: Test/framework add WatchDaemonSetLogsByLabelSelector method (#9984) +- e2e: expose CopyAndAmendClusterctlConfig function (#10086) +- e2e: combine Finalizers tests to default quick start tests (#10039) +- e2e: Use --wait-providers in test framework InitWithBinary func (#10149) +- e2e: Use manager in test extension (#10097) +- e2e: Add scale testing for upgrades (#9077) +- Machine: Be more explicit when skipping node deletion (#10137) +- Machine: Mark Machine healthy condition as unknown if we can't list wl nodes (#9864) +- MachineDeployment: Set revision on machinedeployment and patch via defer (#10160) +- MachinePool: Use SSA Patch to create machines in MP controller (#9791) +- MachineSet: improve replica defaulting for autoscaler (#9649) +- Dependency/Security: Bump github.com/docker/docker from 25.0.4+incompatible to 25.0.5+incompatible (#10302) +- KCP/MachineSet: remove hardcoded WithStepCounterIf(false) (#9772) +- Release: Add integration test for release notes tool (#9617) +- Release: Bump kubebuilder-release-tools to v0.4.3 (#9818) +- Release: Changelog entry to include cert-manager to v1.14.2 (#10188) +- Release: Determine release type from tag to also handle beta releases (#10324) +- Release: Fix dockerfile for clusterctl build (#10058) +- Release: Fix grammar in release script (#9981) +- Release: Improve release-staging build (#10095) +- Release: Improve weekly PR update generation script and documentation (#10092) +- Release: Make release notes tool not dependent on local git (#9618) +- Release: Prep main branch for v1.7 development (#9799) +- Release: Provide faster support for new k8s minor releases (#9971) +- Release: Read in dockerfiles from stdin (#9990) +- Release: Release v1.5.7 (#10251) +- Release: Release v1.6.3 (#10252) +- Release: Swap in new 1.7 RT members in OWNERS (#9855) +- Runtime SDK: Add more helper functions in topologymutation varaible.go to help unmarshal variables (#9670) +- Runtime SDK: Enable integration tests of RuntimeExtensions (#10330) +- Testing: Drop policy v1beta1 API support in the testing framework (#10158) +- Testing: Drop unused runtime features for the in-memory provider (#9778) +- Testing: Fix typo in test framework (#9873) +- Testing: Fixing kubetest warnings for deprecations (#10172) +- Testing: Print conformance image used in kubetest (#10076) +- Testing: Remove k8s.io/utils/pointer from repository (#9836) +- Testing: Small improvements to the inmemory api server (#9935) +- Testing: add dynamic finalizer assertions for e2e framework (#9970) +- Testing: Watch for Cluster resources in topology MachineSet & MachineDeployment controllers (#10029) +- Testing: Watch for Cluster resources in topology MD controller (#9865) +- util: Improve patch helper error handling (#9946) +- util: Use min/max funcs from Go SDK (#9945) + +:book: Additionally, there have been 50 contributions to our documentation and book. (#10005, #10031, #10040, #10061, #10066, #10068, #10084, #10099, #10100, #10115, #10122, #10170, #10174, #10194, #10239, #10257, #10268, #10288, #10289, #10323, #10329, #10334, #9585, #9640, #9767, #9771, #9779, #9782, #9786, #9794, #9797, #9801, #9817, #9829, #9831, #9838, #9856, #9866, #9867, #9868, #9876, #9896, #9897, #9908, #9941, #9949, #9957, #9961, #9972, #9993) + +## Dependencies + +### Added +- github.com/matttproud/golang_protobuf_extensions/v2: [v2.0.0](https://github.com/matttproud/golang_protobuf_extensions/tree/v2.0.0) + +### Changed +- cloud.google.com/go/accessapproval: v1.7.1 → v1.7.4 +- cloud.google.com/go/accesscontextmanager: v1.8.1 → v1.8.4 +- cloud.google.com/go/aiplatform: v1.48.0 → v1.52.0 +- cloud.google.com/go/analytics: v0.21.3 → v0.21.6 +- cloud.google.com/go/apigateway: v1.6.1 → v1.6.4 +- cloud.google.com/go/apigeeconnect: v1.6.1 → v1.6.4 +- cloud.google.com/go/apigeeregistry: v0.7.1 → v0.8.2 +- cloud.google.com/go/appengine: v1.8.1 → v1.8.4 +- cloud.google.com/go/area120: v0.8.1 → v0.8.4 +- cloud.google.com/go/artifactregistry: v1.14.1 → v1.14.6 +- cloud.google.com/go/asset: v1.14.1 → v1.15.3 +- cloud.google.com/go/assuredworkloads: v1.11.1 → v1.11.4 +- cloud.google.com/go/automl: v1.13.1 → v1.13.4 +- cloud.google.com/go/baremetalsolution: v1.1.1 → v1.2.3 +- cloud.google.com/go/batch: v1.3.1 → v1.6.3 +- cloud.google.com/go/beyondcorp: v1.0.0 → v1.0.3 +- cloud.google.com/go/bigquery: v1.53.0 → v1.57.1 +- cloud.google.com/go/billing: v1.16.0 → v1.17.4 +- cloud.google.com/go/binaryauthorization: v1.6.1 → v1.7.3 +- cloud.google.com/go/certificatemanager: v1.7.1 → v1.7.4 +- cloud.google.com/go/channel: v1.16.0 → v1.17.3 +- cloud.google.com/go/cloudbuild: v1.13.0 → v1.14.3 +- cloud.google.com/go/clouddms: v1.6.1 → v1.7.3 +- cloud.google.com/go/cloudtasks: v1.12.1 → v1.12.4 +- cloud.google.com/go/compute: v1.23.0 → v1.23.3 +- cloud.google.com/go/contactcenterinsights: v1.10.0 → v1.11.3 +- cloud.google.com/go/container: v1.24.0 → v1.27.1 +- cloud.google.com/go/containeranalysis: v0.10.1 → v0.11.3 +- cloud.google.com/go/datacatalog: v1.16.0 → v1.18.3 +- cloud.google.com/go/dataflow: v0.9.1 → v0.9.4 +- cloud.google.com/go/dataform: v0.8.1 → v0.9.1 +- cloud.google.com/go/datafusion: v1.7.1 → v1.7.4 +- cloud.google.com/go/datalabeling: v0.8.1 → v0.8.4 +- cloud.google.com/go/dataplex: v1.9.0 → v1.11.1 +- cloud.google.com/go/dataproc/v2: v2.0.1 → v2.2.3 +- cloud.google.com/go/dataqna: v0.8.1 → v0.8.4 +- cloud.google.com/go/datastore: v1.13.0 → v1.15.0 +- cloud.google.com/go/datastream: v1.10.0 → v1.10.3 +- cloud.google.com/go/deploy: v1.13.0 → v1.14.2 +- cloud.google.com/go/dialogflow: v1.40.0 → v1.44.3 +- cloud.google.com/go/dlp: v1.10.1 → v1.11.1 +- cloud.google.com/go/documentai: v1.22.0 → v1.23.5 +- cloud.google.com/go/domains: v0.9.1 → v0.9.4 +- cloud.google.com/go/edgecontainer: v1.1.1 → v1.1.4 +- cloud.google.com/go/essentialcontacts: v1.6.2 → v1.6.5 +- cloud.google.com/go/eventarc: v1.13.0 → v1.13.3 +- cloud.google.com/go/filestore: v1.7.1 → v1.7.4 +- cloud.google.com/go/firestore: v1.13.0 → v1.14.0 +- cloud.google.com/go/functions: v1.15.1 → v1.15.4 +- cloud.google.com/go/gkebackup: v1.3.0 → v1.3.4 +- cloud.google.com/go/gkeconnect: v0.8.1 → v0.8.4 +- cloud.google.com/go/gkehub: v0.14.1 → v0.14.4 +- cloud.google.com/go/gkemulticloud: v1.0.0 → v1.0.3 +- cloud.google.com/go/gsuiteaddons: v1.6.1 → v1.6.4 +- cloud.google.com/go/iam: v1.1.1 → v1.1.5 +- cloud.google.com/go/iap: v1.8.1 → v1.9.3 +- cloud.google.com/go/ids: v1.4.1 → v1.4.4 +- cloud.google.com/go/iot: v1.7.1 → v1.7.4 +- cloud.google.com/go/kms: v1.15.0 → v1.15.5 +- cloud.google.com/go/language: v1.10.1 → v1.12.2 +- cloud.google.com/go/lifesciences: v0.9.1 → v0.9.4 +- cloud.google.com/go/logging: v1.7.0 → v1.8.1 +- cloud.google.com/go/longrunning: v0.5.1 → v0.5.4 +- cloud.google.com/go/managedidentities: v1.6.1 → v1.6.4 +- cloud.google.com/go/maps: v1.4.0 → v1.6.1 +- cloud.google.com/go/mediatranslation: v0.8.1 → v0.8.4 +- cloud.google.com/go/memcache: v1.10.1 → v1.10.4 +- cloud.google.com/go/metastore: v1.12.0 → v1.13.3 +- cloud.google.com/go/monitoring: v1.15.1 → v1.16.3 +- cloud.google.com/go/networkconnectivity: v1.12.1 → v1.14.3 +- cloud.google.com/go/networkmanagement: v1.8.0 → v1.9.3 +- cloud.google.com/go/networksecurity: v0.9.1 → v0.9.4 +- cloud.google.com/go/notebooks: v1.9.1 → v1.11.2 +- cloud.google.com/go/optimization: v1.4.1 → v1.6.2 +- cloud.google.com/go/orchestration: v1.8.1 → v1.8.4 +- cloud.google.com/go/orgpolicy: v1.11.1 → v1.11.4 +- cloud.google.com/go/osconfig: v1.12.1 → v1.12.4 +- cloud.google.com/go/oslogin: v1.10.1 → v1.12.2 +- cloud.google.com/go/phishingprotection: v0.8.1 → v0.8.4 +- cloud.google.com/go/policytroubleshooter: v1.8.0 → v1.10.2 +- cloud.google.com/go/privatecatalog: v0.9.1 → v0.9.4 +- cloud.google.com/go/recaptchaenterprise/v2: v2.7.2 → v2.8.3 +- cloud.google.com/go/recommendationengine: v0.8.1 → v0.8.4 +- cloud.google.com/go/recommender: v1.10.1 → v1.11.3 +- cloud.google.com/go/redis: v1.13.1 → v1.14.1 +- cloud.google.com/go/resourcemanager: v1.9.1 → v1.9.4 +- cloud.google.com/go/resourcesettings: v1.6.1 → v1.6.4 +- cloud.google.com/go/retail: v1.14.1 → v1.14.4 +- cloud.google.com/go/run: v1.2.0 → v1.3.3 +- cloud.google.com/go/scheduler: v1.10.1 → v1.10.4 +- cloud.google.com/go/secretmanager: v1.11.1 → v1.11.4 +- cloud.google.com/go/security: v1.15.1 → v1.15.4 +- cloud.google.com/go/securitycenter: v1.23.0 → v1.24.2 +- cloud.google.com/go/servicedirectory: v1.11.0 → v1.11.3 +- cloud.google.com/go/shell: v1.7.1 → v1.7.4 +- cloud.google.com/go/spanner: v1.47.0 → v1.51.0 +- cloud.google.com/go/speech: v1.19.0 → v1.20.1 +- cloud.google.com/go/storage: v1.14.0 → v1.35.1 +- cloud.google.com/go/storagetransfer: v1.10.0 → v1.10.3 +- cloud.google.com/go/talent: v1.6.2 → v1.6.5 +- cloud.google.com/go/texttospeech: v1.7.1 → v1.7.4 +- cloud.google.com/go/tpu: v1.6.1 → v1.6.4 +- cloud.google.com/go/trace: v1.10.1 → v1.10.4 +- cloud.google.com/go/translate: v1.8.2 → v1.9.3 +- cloud.google.com/go/video: v1.19.0 → v1.20.3 +- cloud.google.com/go/videointelligence: v1.11.1 → v1.11.4 +- cloud.google.com/go/vision/v2: v2.7.2 → v2.7.5 +- cloud.google.com/go/vmmigration: v1.7.1 → v1.7.4 +- cloud.google.com/go/vmwareengine: v1.0.0 → v1.0.3 +- cloud.google.com/go/vpcaccess: v1.7.1 → v1.7.4 +- cloud.google.com/go/webrisk: v1.9.1 → v1.9.4 +- cloud.google.com/go/websecurityscanner: v1.6.1 → v1.6.4 +- cloud.google.com/go/workflows: v1.11.1 → v1.12.3 +- cloud.google.com/go: v0.110.7 → v0.110.10 +- github.com/cloudflare/circl: [v1.3.3 → v1.3.7](https://github.com/cloudflare/circl/compare/v1.3.3...v1.3.7) +- github.com/evanphx/json-patch/v5: [v5.7.0 → v5.9.0](https://github.com/evanphx/json-patch/compare/v5.7.0...v5.9.0) +- github.com/evanphx/json-patch: [v5.6.0+incompatible → v5.7.0+incompatible](https://github.com/evanphx/json-patch/compare/v5.6.0...v5.7.0) +- github.com/frankban/quicktest: [v1.14.4 → v1.14.6](https://github.com/frankban/quicktest/compare/v1.14.4...v1.14.6) +- github.com/fsnotify/fsnotify: [v1.6.0 → v1.7.0](https://github.com/fsnotify/fsnotify/compare/v1.6.0...v1.7.0) +- github.com/go-logr/logr: [v1.3.0 → v1.4.1](https://github.com/go-logr/logr/compare/v1.3.0...v1.4.1) +- github.com/go-logr/zapr: [v1.2.4 → v1.3.0](https://github.com/go-logr/zapr/compare/v1.2.4...v1.3.0) +- github.com/golang/mock: [v1.4.4 → v1.4.0](https://github.com/golang/mock/compare/v1.4.4...v1.4.0) +- github.com/golang/protobuf: [v1.5.3 → v1.5.4](https://github.com/golang/protobuf/compare/v1.5.3...v1.5.4) +- github.com/google/cel-go: [v0.16.1 → v0.17.7](https://github.com/google/cel-go/compare/v0.16.1...v0.17.7) +- github.com/google/uuid: [v1.3.1 → v1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) +- github.com/googleapis/enterprise-certificate-proxy: [v0.3.1 → v0.3.2](https://github.com/googleapis/enterprise-certificate-proxy/compare/v0.3.1...v0.3.2) +- github.com/googleapis/google-cloud-go-testing: [bcd43fb → 1c9a4c6](https://github.com/googleapis/google-cloud-go-testing/compare/bcd43fb...1c9a4c6) +- github.com/gorilla/websocket: [v1.4.2 → v1.5.0](https://github.com/gorilla/websocket/compare/v1.4.2...v1.5.0) +- github.com/nats-io/nats.go: [v1.30.2 → v1.31.0](https://github.com/nats-io/nats.go/compare/v1.30.2...v1.31.0) +- github.com/nats-io/nkeys: [v0.4.5 → v0.4.6](https://github.com/nats-io/nkeys/compare/v0.4.5...v0.4.6) +- github.com/onsi/ginkgo/v2: [v2.13.1 → v2.17.1](https://github.com/onsi/ginkgo/compare/v2.13.1...v2.17.1) +- github.com/onsi/gomega: [v1.30.0 → v1.32.0](https://github.com/onsi/gomega/compare/v1.30.0...v1.32.0) +- github.com/pkg/sftp: [v1.13.1 → v1.13.6](https://github.com/pkg/sftp/compare/v1.13.1...v1.13.6) +- github.com/prometheus/client_golang: [v1.17.0 → v1.18.0](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0) +- github.com/prometheus/client_model: [9a2bf30 → v0.5.0](https://github.com/prometheus/client_model/compare/9a2bf30...v0.5.0) +- github.com/prometheus/common: [v0.44.0 → v0.45.0](https://github.com/prometheus/common/compare/v0.44.0...v0.45.0) +- github.com/prometheus/procfs: [v0.11.1 → v0.12.0](https://github.com/prometheus/procfs/compare/v0.11.1...v0.12.0) +- github.com/sagikazarmark/crypt: [v0.15.0 → v0.17.0](https://github.com/sagikazarmark/crypt/compare/v0.15.0...v0.17.0) +- github.com/sagikazarmark/locafero: [v0.3.0 → v0.4.0](https://github.com/sagikazarmark/locafero/compare/v0.3.0...v0.4.0) +- github.com/spf13/afero: [v1.10.0 → v1.11.0](https://github.com/spf13/afero/compare/v1.10.0...v1.11.0) +- github.com/spf13/cast: [v1.5.1 → v1.6.0](https://github.com/spf13/cast/compare/v1.5.1...v1.6.0) +- github.com/spf13/viper: [v1.17.0 → v1.18.2](https://github.com/spf13/viper/compare/v1.17.0...v1.18.2) +- go.etcd.io/bbolt: v1.3.7 → v1.3.8 +- go.etcd.io/etcd/api/v3: v3.5.10 → v3.5.13 +- go.etcd.io/etcd/client/pkg/v3: v3.5.10 → v3.5.13 +- go.etcd.io/etcd/client/v2: v2.305.9 → v2.305.10 +- go.etcd.io/etcd/client/v3: v3.5.10 → v3.5.13 +- go.etcd.io/etcd/pkg/v3: v3.5.9 → v3.5.10 +- go.etcd.io/etcd/raft/v3: v3.5.9 → v3.5.10 +- go.etcd.io/etcd/server/v3: v3.5.9 → v3.5.10 +- go.uber.org/zap: v1.25.0 → v1.26.0 +- golang.org/x/crypto: v0.15.0 → v0.21.0 +- golang.org/x/lint: 83fdc39 → 910be7a +- golang.org/x/mod: v0.13.0 → v0.14.0 +- golang.org/x/net: v0.18.0 → v0.22.0 +- golang.org/x/oauth2: v0.14.0 → v0.18.0 +- golang.org/x/sync: v0.4.0 → v0.6.0 +- golang.org/x/sys: v0.14.0 → v0.18.0 +- golang.org/x/term: v0.14.0 → v0.18.0 +- golang.org/x/time: v0.3.0 → v0.5.0 +- golang.org/x/tools: v0.14.0 → v0.17.0 +- google.golang.org/api: v0.143.0 → v0.153.0 +- google.golang.org/genproto/googleapis/api: 007df8e → bbf56f3 +- google.golang.org/genproto/googleapis/rpc: e6e6cda → 83a465c +- google.golang.org/genproto: 007df8e → bbf56f3 +- google.golang.org/protobuf: v1.31.0 → v1.33.0 +- honnef.co/go/tools: v0.0.1-2020.1.4 → v0.0.1-2019.2.3 +- k8s.io/api: v0.28.4 → v0.29.3 +- k8s.io/apiextensions-apiserver: v0.28.4 → v0.29.3 +- k8s.io/apimachinery: v0.28.4 → v0.29.3 +- k8s.io/apiserver: v0.28.4 → v0.29.3 +- k8s.io/cli-runtime: v0.28.4 → v0.29.3 +- k8s.io/client-go: v0.28.4 → v0.29.3 +- k8s.io/cluster-bootstrap: v0.28.4 → v0.29.3 +- k8s.io/code-generator: v0.28.4 → v0.29.3 +- k8s.io/component-base: v0.28.4 → v0.29.3 +- k8s.io/component-helpers: v0.28.4 → v0.29.3 +- k8s.io/gengo: c0856e2 → 9cce18d +- k8s.io/klog/v2: v2.100.1 → v2.110.1 +- k8s.io/kms: v0.28.4 → v0.29.3 +- k8s.io/kube-openapi: 2695361 → 2dd684a +- k8s.io/kubectl: v0.28.4 → v0.29.3 +- k8s.io/metrics: v0.28.4 → v0.29.3 +- k8s.io/utils: d93618c → b307cd5 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.1.2 → v0.28.0 +- sigs.k8s.io/controller-runtime: v0.16.3 → v0.17.2 +- sigs.k8s.io/structured-merge-diff/v4: v4.2.3 → v4.4.1 + +### Removed +- github.com/benbjohnson/clock: [v1.3.0](https://github.com/benbjohnson/clock/tree/v1.3.0) +- github.com/docker/distribution: [v2.8.3+incompatible](https://github.com/docker/distribution/tree/v2.8.3) +- github.com/google/martian/v3: [v3.1.0](https://github.com/google/martian/tree/v3.1.0) +- github.com/minio/highwayhash: [v1.0.2](https://github.com/minio/highwayhash/tree/v1.0.2) +- github.com/nats-io/jwt/v2: [v2.4.1](https://github.com/nats-io/jwt/tree/v2.4.1) +- go.opentelemetry.io/otel/exporters/otlp/internal/retry: v1.10.0 + +
+
+ +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.7.0-rc.1.md b/CHANGELOG/v1.7.0-rc.1.md new file mode 100644 index 000000000000..af1e38e4f57a --- /dev/null +++ b/CHANGELOG/v1.7.0-rc.1.md @@ -0,0 +1,464 @@ +🚨 This is a RELEASE CANDIDATE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). + +## Highlights + +* Enable kubeadm feature gates mutation +* Add public function to create new CRD migrator +* Add MachineSetReady condition to MachineDeployment +* Set MachinePool feature flag default to true + Beta + +## Changes since v1.7.0-rc.0 +## :chart_with_upwards_trend: Overview +- 8 new commits merged +- 2 bugs fixed 🐛 + +## :bug: Bug Fixes +- util: Checking cert's keypair for nil before accessing to avoid panics (#10368) +- util: recover gvk after scheme.Convert (#10409) + +## :seedling: Others +- CI: Add more templating func to prowjob-gen (#10403) +- CI: Change base branch for link checker (#10365) +- Dependency: Bump golang to v1.21.9 and golang.org/x/net to mitigate CVE-2023-45288 (#10378) +- Dependency: Bump sigs.k8s.io/controller-runtime from 0.17.2 to 0.17.3 (#10406) +- Release: also detect alpha releases as pre releases (#10379) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- golang.org/x/net: v0.22.0 → v0.23.0 +- sigs.k8s.io/controller-runtime: v0.17.2 → v0.17.3 + +### Removed +_Nothing has changed._ + +
+More details about the release + +:warning: **RELEASE CANDIDATE NOTES** :warning: +## 👌 Kubernetes version support + +- Management Cluster: v1.26.x -> v1.29.x +- Workload Cluster: v1.24.x -> v1.29.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.6.0 +## :chart_with_upwards_trend: Overview +- 333 new commits merged +- 5 breaking changes :warning: +- 12 feature additions ✨ +- 44 bugs fixed 🐛 + +## :memo: Proposals +- MachinePool: Update MachinePool Machines proposal with diagrams (#9664) + +## :warning: Breaking Changes +- API: Remove v1alpha4 API Version (#9939) +- Dependency: Bump to controller-runtime v0.17.0 & k8s.io v0.29 (#9964) +- Logging: Make event recorder names consistent for KCP and topology controller (#9862) +- MachinePool: Set MachinePool feature flag default to true + Beta (#10141) +- KCP/MachineSet: Objects generated by KCP, MachineSets and MachinePools will now consistently use machine name (#9833) + +## :sparkles: New Features +- API: Enable kubeadm feature gates mutation (#10154) +- clusterctl: Add k0smotron provider (#9991) +- clusterctl: Add public function to create new CRD migrator (#10075) +- Control-plane: Allow mutation of all fields that should be mutable (#9871) +- MachineDeployment: Add MachineSetReady condition to MachineDeployment (#9262) +- MachineDeployment: Taint nodes with PreferNoSchedule during rollouts (#10223) +- MachineHealthCheck: implement annotation to manually mark machines for remediation (#10202) +- Release: Add dependencies section to release notes tool (#10043) +- Release: Add release notes expander functionality (#10091) +- Release: Add utility to create git issues on provider repo (#9110) +- Testing: Resolve release markers (#9265) +- Testing: Prepare quickstart, capd and tests for the new release including kind bump (#9872) + +## :bug: Bug Fixes +- API: Use ptr instead of own implementation (#10276) +- API/e2e: Restore v1alpha3/v1alpha4 conversion to fix SSA issue & add e2e test coverage (#10147) +- CABPK: Only refresh bootstrap token if needed, requeue in all cases where node hasn't joined yet (#9229) +- CAPD: Fix ignition to also set the kube-proxy configuration to skip setting sysctls (#9894) +- CAPD: Remove --enable-hostpath-provisioner flag (#10271) +- CAPD: Remove duplicate fix for btrfs/zfs support (#8376) +- ClusterCacheTracker: Fix ClusterCacheTracker memory leak (#9543) +- ClusterCacheTracker: Use RequeueAfter instead of immediate requeue on ErrClusterLocked to not have exponentially increasing requeue time (#9810) +- ClusterClass: Fix for TestServerSideApplyWithDefaulting (#10307) +- ClusterClass: Improve handling of topology orphaned objects (#10277) +- clusterctl: Move handlePlugins function call out of init to allow debugging tests (#10200) +- clusterctl: Validate no objects exist from CRDs before deleting them (#9808) +- ClusterResourceSet: Requeue after 1 minute if ErrClusterLocked got hit (#9777) +- Control-plane: KCP should defer remediation when a control plane machine is still provisioning (#9734) +- Devtools: Show default cluster-template (#9820) +- e2e: Add wait for MachineList to be available (#10301) +- e2e: fix unset GINKGO_FOCUS variable (#10085) +- e2e: Fix clusterctl upgrade e2e tests (enable CRS) (#10340) +- e2e: Make MachinePools and MachineDeployments optional in ApplyClusterTemplateAndWait (#9960) +- e2e: Re-introduce exclude capi-webhook-system to fix test flake (#10157) +- e2e: Calculate correct worker count in clusterctl upgrade test (#9892) +- e2e: Fix finalizers test to not only rely on namespaced name (#9891) +- e2e: retry GetOwnerGraph in owner references test on certificate errors (#10201) +- IPAM: Fix webhooks using mixed api versions (#9861) +- KCP: Skip checking `clusterConfiguration.dns` fields when KCP checking MachineNeedRollout (#9857) +- Logging: Improve log k/v pairs and a improve/drop a few log lines (#9813) +- Machine: Bubble up machine drain condition in `MachinesReadyCondition` (#9355) +- Machine: Watch external objects for machine before deleting (#10041) +- MachinePool: Fix TestReconcileMachinePoolScaleToFromZero flakes (#9745) +- Release: Fix Community meeting area formatting in release notes (#9784) +- Release: Fix defaulting logic in release notes tool (#9958) +- Release: Fix documentation area comparison in release notes (#9769) +- Release: Fix kubeadm bootstrap prefix in release notes (#9814) +- Release: Fix wrong branch name display for weekly update script (#9918) +- Runtime SDK: Fix ClusterClass variables status & RuntimeExtension and add test coverage (#10337) +- Runtime SDK: Use keys/values structured log interface (#9998) +- Testing: Fix using corerect testing.T when creating gomega object (#10342) +- Testing: Revert "Watch for Cluster resources in topology MD controller" (#9985) +- Testing: Reverting ginkgo.show-node-events to ginkgo.progress (#10214) +- Testing: fix flaky test TestPatch/Test patch with Machine (#9914) +- Testing: wait for topology to get rolled out before continuing with scaling checks (#9819) +- util: Add tests and update provider_issues.go (#10264) +- util: Checking cert's keypair for nil before accessing to avoid panics (#10368) +- util: recover gvk after scheme.Convert + +## :seedling: Others +- API: Stop relying on GVK being set on regular typed objects (#9956) +- Bootstrap: Add MachinePool test for switching bootstrap config to another ready/non-ready object with different name (#9616) +- CABPK: Add pod metadata to capbk manager (#10208) +- CAPD: make docker machine bootstrap timeout configurable (#9952) +- CAPD: Remove requeues in DockerMachinePool (#9725) +- CAPD: Support configuring ExtraPortMappings for the kind cluster (#10046) +- CAPIM: Add namespaces to the list of resource handled by the in-memory API server (#10297) +- CAPIM: Allow using different resource group and listener name with the in memory server (#10096) +- CAPIM: Make in memory API server more tolerant when starting (#10211) +- CAPIM: Make in memory runtime and server accessible from outside (#9986) +- CAPIM: use port only to identify the wcl to make port-forward… (#10245) +- CI: Add fail fast to DumpResourcesForCluster in case of no route to host (#10204) +- CI: Add more templating func to prowjob-gen (#10403) +- CI: Added go directive test (#10261) +- CI: Bump conversion-gen to v0.29.0 (#10012) +- CI: Bump go-apidiff to v0.8.2 (#10011) +- CI: Bump govulncheck to v1.0.4 (#10274) +- CI: Bump kpromo to v4.0.5 (#10140) +- CI: Bump kubebuilder envtest to 1.29.0 (#10013) +- CI: Change base branch for link checker (#10365) +- CI: DumpResourcesForCluster should fail fast for i/o errors (#10238) +- CI: Ensure build images contains correct binary and for correct architecture (#9932) +- CI: Fix Make target generate-go-openapi (#10161) +- CI: Fix TestPatchNode flake (#10287) +- CI: Hack/prowjob-gen skip creating empty files (#10022) +- CI: implement generator for prowjobs (#9937) +- CI: Makefile: bump dependencies (#10236) +- CI: bump WorkloadKubernetesVersion for v1.6 clusterctl upgrade test (#10017) +- CI: adjust capd dockerfile so the binary exposes the package in the built binraries path variable (#10030) +- CI: Update GH actions to work with new release-1.6 branch (#9708) +- ClusterClass: Add ClusterClass variables metadata (#10308) +- ClusterClass: Add unit tests for MachinePools for webhooks and cc controller (#10055) +- ClusterClass: Add unit tests for MachinePools in topology/scope package (#10052) +- ClusterClass: Implement topology validations for topology kubernetes version upgrades (#10063) +- ClusterClass: use the alias for ClusterCacheTrackerReader instead of the internal reference (#10309) +- clusterctl: Add 0 default to worker-machine-count help (#10203) +- clusterctl: Add completion for fish shell (#9950) +- clusterctl: Add hivelocity infra provider to clusterctl (#10168) +- clusterctl: Add in-cluster ipam provider (#8811) +- clusterctl: Add Proxmox provider quickstart (#9798) +- clusterctl: Better verbose logging on override path (#10180) +- clusterctl: Bump cert-manager to 1.14.2 (#10126) +- clusterctl: Bump cert-manager to 1.14.4 (#10267) +- clusterctl: Bump cert-manager to v1.14.1 (#10113) +- clusterctl: Clarify rules for adding new clusterctl default providers (#9975) +- clusterctl: Deprecate clusterctl alpha topology plan (#10139) +- clusterctl: Replace context.TODO() from clusterctl proxy.go (#9776) +- Community meeting: Move CecileRobertMichon to emeritus (#10042) +- Community meeting: Move ykakarap to emeritus approvers (#10000) +- Community meeting: Promote chrischdi to Cluster API maintainer (#9997) +- Dependency: Bump `github.com/docker/docker` from 24.0.7 -> 25.0.0 (#10057) +- Dependency: Bump `golangci-lint` to v1.56.1 (#10124) +- Dependency: Bump controller-runtime to v0.17.1 (#10131) +- Dependency: Bump controller-runtime to v0.17.2 (#10162) +- Dependency: Bump controller-tools to v0.14 (#9987) +- Dependency: Bump crypto dep version (#9938) +- Dependency: Bump github.com/onsi/ginkgo/v2 from 2.16.0 to 2.17.0 (#10284) +- Dependency: Bump github.com/onsi/gomega from 1.31.1 to 1.32.0 (#10285) +- Dependency: Bump Go to 1.21.5 (#9900) +- Dependency: Bump go version to 1.21.8 (#10235) +- Dependency: Bump golang to v1.21.9 and golang.org/x/net to mitigate CVE-2023-45288 (#10378) +- Dependency: Bump kind version to v0.22.0 (#10094) +- Dependency: Bump protobuf to v1.33.0 to address CVEs (#10248) +- Dependency: Bump github.com/cloudflare/circl to v1.3.7 (#10001) +- Dependency: Bump the kubernetes group to v0.28.5 (#9933) +- Dependency: Bump the kubernetes group with 8 updates (#10286) +- Dependency: Bump to Go 1.20.12 (#9840) +- Dependency: Bump github.com/cloudflare/circl to v1.3.7 (#10001) +- Dependency/Security: Bump github.com/docker/docker from 25.0.4+incompatible to 25.0.5+incompatible (#10302) +- Devtools: Add missing folder to clean-tilt make target (#9934) +- Devtools: Allow tilt provider with pre-build images (#10244) +- Devtools: Explicitly set golangci config for sub modules (#9821) +- Devtools: Fix variables names in Tiltfile (#9811) +- Devtools: Implement privileged namespace security policy update for tilt-prepare (#10178) +- Devtools: Simplify testing nightly builds with clusterctl (#10018) +- Devtools: Small improvements to tilt (#9936) +- e2e: Add conformance e2e tests (#10060) +- e2e: Add DeepCopy method for E2EConfig (#9988) +- e2e: Add PostCreateNamespace hook to E2E tests (#10067) +- e2e: Add test util to resolve Kubernetes versions (#9995) +- e2e: Allow to specify cluster name for E2E quick-start (#10088) +- e2e: Bump cluster-autoscaler to v1.29.0 (#9942) +- e2e: Drop duplicated scale test (#9968) +- e2e: Fix ci-latest test to actually use ci latest (#10080) +- e2e: Fix CRS e2e helper with multiple bindings (#10191) +- e2e: Improve output of exec.KubectlApply (#9737) +- e2e: Make etcd and CoreDNS optional in upgrade and self-hosted tests (#9963) +- e2e: add WatchDaemonSetLogsByLabelSelector method (#9984) +- e2e: expose CopyAndAmendClusterctlConfig function (#10086) +- e2e: combine Finalizers tests to default quick start tests (#10039) +- e2e: Use --wait-providers in test framework InitWithBinary func (#10149) +- e2e: Use manager in test extension (#10097) +- KCP/MachineSet: remove hardcoded WithStepCounterIf(false) (#9772) +- Machine: Be more explicit when skipping node deletion (#10137) +- Machine: Mark Machine healthy condition as unknown if we can't list wl nodes (#9864) +- MachineDeployment: Set revision on machinedeployment and patch via defer (#10160) +- MachinePool: Use SSA Patch to create machines in MP controller (#9791) +- MachineSet: improve replica defaulting for autoscaler (#9649) +- Release: Add integration test for release notes tool (#9617) +- Release: Bump kubebuilder-release-tools to v0.4.3 (#9818) +- Release: Changelog entry to include cert-manager to v1.14.2 (#10188) +- Release: Determine release type from tag to also handle beta releases (#10324) +- Release: Fix dockerfile for clusterctl build (#10058) +- Release: Fix grammar in release script (#9981) +- Release: Improve release-staging build (#10095) +- Release: Improve weekly PR update generation script and documentation (#10092) +- Release: Make release notes tool not dependent on local git (#9618) +- Release: Prep main branch for v1.7 development (#9799) +- Release: Provide faster support for new k8s minor releases (#9971) +- Release: Read in dockerfiles from stdin (#9990) +- Release: also detect alpha releases as pre releases (#10379) +- Release: Release v1.5.7 (#10251) +- Release: Release v1.6.3 (#10252) +- Release: Swap in new 1.7 RT members in OWNERS (#9855) +- Runtime SDK: Add more helper functions in topologymutation varaible.go to help unmarshal variables (#9670) +- Runtime SDK: Enable integration tests of RuntimeExtensions (#10330) +- Testing: Add scale testing for upgrades (#9077) +- Testing: Drop policy v1beta1 API support in the testing framework (#10158) +- Testing: Drop unused runtime features for the in-memory provider (#9778) +- Testing: Fix typo in test framework (#9873) +- Testing: Fixing kubetest warnings for deprecations (#10172) +- Testing: Print conformance image used in kubetest (#10076) +- Testing: Remove k8s.io/utils/pointer from repository (#9836) +- Testing: Small improvements to the inmemory api server (#9935) +- Testing: add dynamic finalizer assertions for e2e framework (#9970) +- Testing: Watch for Cluster resources in topology MachineSet & MachineDeployment controllers (#10029) +- Testing: Watch for Cluster resources in topology MD controller (#9865) +- util: Improve patch helper error handling (#9946) +- util: Use min/max funcs from Go SDK (#9945) + +:book: Additionally, there have been 52 contributions to our documentation and book. (#10005, #10031, #10040, #10061, #10066, #10068, #10084, #10099, #10100, #10115, #10122, #10170, #10174, #10194, #10239, #10257, #10268, #10288, #10289, #10323, #10329, #10334, #10381, #10393, #9585, #9640, #9767, #9771, #9779, #9782, #9786, #9794, #9797, #9801, #9817, #9829, #9831, #9838, #9856, #9866, #9867, #9868, #9876, #9896, #9897, #9908, #9941, #9949, #9957, #9961, #9972, #9993) + +## Dependencies + +### Added +- github.com/matttproud/golang_protobuf_extensions/v2: [v2.0.0](https://github.com/matttproud/golang_protobuf_extensions/tree/v2.0.0) + +### Changed +- cloud.google.com/go/accessapproval: v1.7.1 → v1.7.4 +- cloud.google.com/go/accesscontextmanager: v1.8.1 → v1.8.4 +- cloud.google.com/go/aiplatform: v1.48.0 → v1.52.0 +- cloud.google.com/go/analytics: v0.21.3 → v0.21.6 +- cloud.google.com/go/apigateway: v1.6.1 → v1.6.4 +- cloud.google.com/go/apigeeconnect: v1.6.1 → v1.6.4 +- cloud.google.com/go/apigeeregistry: v0.7.1 → v0.8.2 +- cloud.google.com/go/appengine: v1.8.1 → v1.8.4 +- cloud.google.com/go/area120: v0.8.1 → v0.8.4 +- cloud.google.com/go/artifactregistry: v1.14.1 → v1.14.6 +- cloud.google.com/go/asset: v1.14.1 → v1.15.3 +- cloud.google.com/go/assuredworkloads: v1.11.1 → v1.11.4 +- cloud.google.com/go/automl: v1.13.1 → v1.13.4 +- cloud.google.com/go/baremetalsolution: v1.1.1 → v1.2.3 +- cloud.google.com/go/batch: v1.3.1 → v1.6.3 +- cloud.google.com/go/beyondcorp: v1.0.0 → v1.0.3 +- cloud.google.com/go/bigquery: v1.53.0 → v1.57.1 +- cloud.google.com/go/billing: v1.16.0 → v1.17.4 +- cloud.google.com/go/binaryauthorization: v1.6.1 → v1.7.3 +- cloud.google.com/go/certificatemanager: v1.7.1 → v1.7.4 +- cloud.google.com/go/channel: v1.16.0 → v1.17.3 +- cloud.google.com/go/cloudbuild: v1.13.0 → v1.14.3 +- cloud.google.com/go/clouddms: v1.6.1 → v1.7.3 +- cloud.google.com/go/cloudtasks: v1.12.1 → v1.12.4 +- cloud.google.com/go/compute: v1.23.0 → v1.23.3 +- cloud.google.com/go/contactcenterinsights: v1.10.0 → v1.11.3 +- cloud.google.com/go/container: v1.24.0 → v1.27.1 +- cloud.google.com/go/containeranalysis: v0.10.1 → v0.11.3 +- cloud.google.com/go/datacatalog: v1.16.0 → v1.18.3 +- cloud.google.com/go/dataflow: v0.9.1 → v0.9.4 +- cloud.google.com/go/dataform: v0.8.1 → v0.9.1 +- cloud.google.com/go/datafusion: v1.7.1 → v1.7.4 +- cloud.google.com/go/datalabeling: v0.8.1 → v0.8.4 +- cloud.google.com/go/dataplex: v1.9.0 → v1.11.1 +- cloud.google.com/go/dataproc/v2: v2.0.1 → v2.2.3 +- cloud.google.com/go/dataqna: v0.8.1 → v0.8.4 +- cloud.google.com/go/datastore: v1.13.0 → v1.15.0 +- cloud.google.com/go/datastream: v1.10.0 → v1.10.3 +- cloud.google.com/go/deploy: v1.13.0 → v1.14.2 +- cloud.google.com/go/dialogflow: v1.40.0 → v1.44.3 +- cloud.google.com/go/dlp: v1.10.1 → v1.11.1 +- cloud.google.com/go/documentai: v1.22.0 → v1.23.5 +- cloud.google.com/go/domains: v0.9.1 → v0.9.4 +- cloud.google.com/go/edgecontainer: v1.1.1 → v1.1.4 +- cloud.google.com/go/essentialcontacts: v1.6.2 → v1.6.5 +- cloud.google.com/go/eventarc: v1.13.0 → v1.13.3 +- cloud.google.com/go/filestore: v1.7.1 → v1.7.4 +- cloud.google.com/go/firestore: v1.13.0 → v1.14.0 +- cloud.google.com/go/functions: v1.15.1 → v1.15.4 +- cloud.google.com/go/gkebackup: v1.3.0 → v1.3.4 +- cloud.google.com/go/gkeconnect: v0.8.1 → v0.8.4 +- cloud.google.com/go/gkehub: v0.14.1 → v0.14.4 +- cloud.google.com/go/gkemulticloud: v1.0.0 → v1.0.3 +- cloud.google.com/go/gsuiteaddons: v1.6.1 → v1.6.4 +- cloud.google.com/go/iam: v1.1.1 → v1.1.5 +- cloud.google.com/go/iap: v1.8.1 → v1.9.3 +- cloud.google.com/go/ids: v1.4.1 → v1.4.4 +- cloud.google.com/go/iot: v1.7.1 → v1.7.4 +- cloud.google.com/go/kms: v1.15.0 → v1.15.5 +- cloud.google.com/go/language: v1.10.1 → v1.12.2 +- cloud.google.com/go/lifesciences: v0.9.1 → v0.9.4 +- cloud.google.com/go/logging: v1.7.0 → v1.8.1 +- cloud.google.com/go/longrunning: v0.5.1 → v0.5.4 +- cloud.google.com/go/managedidentities: v1.6.1 → v1.6.4 +- cloud.google.com/go/maps: v1.4.0 → v1.6.1 +- cloud.google.com/go/mediatranslation: v0.8.1 → v0.8.4 +- cloud.google.com/go/memcache: v1.10.1 → v1.10.4 +- cloud.google.com/go/metastore: v1.12.0 → v1.13.3 +- cloud.google.com/go/monitoring: v1.15.1 → v1.16.3 +- cloud.google.com/go/networkconnectivity: v1.12.1 → v1.14.3 +- cloud.google.com/go/networkmanagement: v1.8.0 → v1.9.3 +- cloud.google.com/go/networksecurity: v0.9.1 → v0.9.4 +- cloud.google.com/go/notebooks: v1.9.1 → v1.11.2 +- cloud.google.com/go/optimization: v1.4.1 → v1.6.2 +- cloud.google.com/go/orchestration: v1.8.1 → v1.8.4 +- cloud.google.com/go/orgpolicy: v1.11.1 → v1.11.4 +- cloud.google.com/go/osconfig: v1.12.1 → v1.12.4 +- cloud.google.com/go/oslogin: v1.10.1 → v1.12.2 +- cloud.google.com/go/phishingprotection: v0.8.1 → v0.8.4 +- cloud.google.com/go/policytroubleshooter: v1.8.0 → v1.10.2 +- cloud.google.com/go/privatecatalog: v0.9.1 → v0.9.4 +- cloud.google.com/go/recaptchaenterprise/v2: v2.7.2 → v2.8.3 +- cloud.google.com/go/recommendationengine: v0.8.1 → v0.8.4 +- cloud.google.com/go/recommender: v1.10.1 → v1.11.3 +- cloud.google.com/go/redis: v1.13.1 → v1.14.1 +- cloud.google.com/go/resourcemanager: v1.9.1 → v1.9.4 +- cloud.google.com/go/resourcesettings: v1.6.1 → v1.6.4 +- cloud.google.com/go/retail: v1.14.1 → v1.14.4 +- cloud.google.com/go/run: v1.2.0 → v1.3.3 +- cloud.google.com/go/scheduler: v1.10.1 → v1.10.4 +- cloud.google.com/go/secretmanager: v1.11.1 → v1.11.4 +- cloud.google.com/go/security: v1.15.1 → v1.15.4 +- cloud.google.com/go/securitycenter: v1.23.0 → v1.24.2 +- cloud.google.com/go/servicedirectory: v1.11.0 → v1.11.3 +- cloud.google.com/go/shell: v1.7.1 → v1.7.4 +- cloud.google.com/go/spanner: v1.47.0 → v1.51.0 +- cloud.google.com/go/speech: v1.19.0 → v1.20.1 +- cloud.google.com/go/storage: v1.14.0 → v1.35.1 +- cloud.google.com/go/storagetransfer: v1.10.0 → v1.10.3 +- cloud.google.com/go/talent: v1.6.2 → v1.6.5 +- cloud.google.com/go/texttospeech: v1.7.1 → v1.7.4 +- cloud.google.com/go/tpu: v1.6.1 → v1.6.4 +- cloud.google.com/go/trace: v1.10.1 → v1.10.4 +- cloud.google.com/go/translate: v1.8.2 → v1.9.3 +- cloud.google.com/go/video: v1.19.0 → v1.20.3 +- cloud.google.com/go/videointelligence: v1.11.1 → v1.11.4 +- cloud.google.com/go/vision/v2: v2.7.2 → v2.7.5 +- cloud.google.com/go/vmmigration: v1.7.1 → v1.7.4 +- cloud.google.com/go/vmwareengine: v1.0.0 → v1.0.3 +- cloud.google.com/go/vpcaccess: v1.7.1 → v1.7.4 +- cloud.google.com/go/webrisk: v1.9.1 → v1.9.4 +- cloud.google.com/go/websecurityscanner: v1.6.1 → v1.6.4 +- cloud.google.com/go/workflows: v1.11.1 → v1.12.3 +- cloud.google.com/go: v0.110.7 → v0.110.10 +- github.com/cloudflare/circl: [v1.3.3 → v1.3.7](https://github.com/cloudflare/circl/compare/v1.3.3...v1.3.7) +- github.com/evanphx/json-patch/v5: [v5.7.0 → v5.9.0](https://github.com/evanphx/json-patch/compare/v5.7.0...v5.9.0) +- github.com/evanphx/json-patch: [v5.6.0+incompatible → v5.7.0+incompatible](https://github.com/evanphx/json-patch/compare/v5.6.0...v5.7.0) +- github.com/frankban/quicktest: [v1.14.4 → v1.14.6](https://github.com/frankban/quicktest/compare/v1.14.4...v1.14.6) +- github.com/fsnotify/fsnotify: [v1.6.0 → v1.7.0](https://github.com/fsnotify/fsnotify/compare/v1.6.0...v1.7.0) +- github.com/go-logr/logr: [v1.3.0 → v1.4.1](https://github.com/go-logr/logr/compare/v1.3.0...v1.4.1) +- github.com/go-logr/zapr: [v1.2.4 → v1.3.0](https://github.com/go-logr/zapr/compare/v1.2.4...v1.3.0) +- github.com/golang/mock: [v1.4.4 → v1.4.0](https://github.com/golang/mock/compare/v1.4.4...v1.4.0) +- github.com/golang/protobuf: [v1.5.3 → v1.5.4](https://github.com/golang/protobuf/compare/v1.5.3...v1.5.4) +- github.com/google/cel-go: [v0.16.1 → v0.17.7](https://github.com/google/cel-go/compare/v0.16.1...v0.17.7) +- github.com/google/uuid: [v1.3.1 → v1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) +- github.com/googleapis/enterprise-certificate-proxy: [v0.3.1 → v0.3.2](https://github.com/googleapis/enterprise-certificate-proxy/compare/v0.3.1...v0.3.2) +- github.com/googleapis/google-cloud-go-testing: [bcd43fb → 1c9a4c6](https://github.com/googleapis/google-cloud-go-testing/compare/bcd43fb...1c9a4c6) +- github.com/gorilla/websocket: [v1.4.2 → v1.5.0](https://github.com/gorilla/websocket/compare/v1.4.2...v1.5.0) +- github.com/nats-io/nats.go: [v1.30.2 → v1.31.0](https://github.com/nats-io/nats.go/compare/v1.30.2...v1.31.0) +- github.com/nats-io/nkeys: [v0.4.5 → v0.4.6](https://github.com/nats-io/nkeys/compare/v0.4.5...v0.4.6) +- github.com/onsi/ginkgo/v2: [v2.13.1 → v2.17.1](https://github.com/onsi/ginkgo/compare/v2.13.1...v2.17.1) +- github.com/onsi/gomega: [v1.30.0 → v1.32.0](https://github.com/onsi/gomega/compare/v1.30.0...v1.32.0) +- github.com/pkg/sftp: [v1.13.1 → v1.13.6](https://github.com/pkg/sftp/compare/v1.13.1...v1.13.6) +- github.com/prometheus/client_golang: [v1.17.0 → v1.18.0](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0) +- github.com/prometheus/client_model: [9a2bf30 → v0.5.0](https://github.com/prometheus/client_model/compare/9a2bf30...v0.5.0) +- github.com/prometheus/common: [v0.44.0 → v0.45.0](https://github.com/prometheus/common/compare/v0.44.0...v0.45.0) +- github.com/prometheus/procfs: [v0.11.1 → v0.12.0](https://github.com/prometheus/procfs/compare/v0.11.1...v0.12.0) +- github.com/sagikazarmark/crypt: [v0.15.0 → v0.17.0](https://github.com/sagikazarmark/crypt/compare/v0.15.0...v0.17.0) +- github.com/sagikazarmark/locafero: [v0.3.0 → v0.4.0](https://github.com/sagikazarmark/locafero/compare/v0.3.0...v0.4.0) +- github.com/spf13/afero: [v1.10.0 → v1.11.0](https://github.com/spf13/afero/compare/v1.10.0...v1.11.0) +- github.com/spf13/cast: [v1.5.1 → v1.6.0](https://github.com/spf13/cast/compare/v1.5.1...v1.6.0) +- github.com/spf13/viper: [v1.17.0 → v1.18.2](https://github.com/spf13/viper/compare/v1.17.0...v1.18.2) +- go.etcd.io/bbolt: v1.3.7 → v1.3.8 +- go.etcd.io/etcd/api/v3: v3.5.10 → v3.5.13 +- go.etcd.io/etcd/client/pkg/v3: v3.5.10 → v3.5.13 +- go.etcd.io/etcd/client/v2: v2.305.9 → v2.305.10 +- go.etcd.io/etcd/client/v3: v3.5.10 → v3.5.13 +- go.etcd.io/etcd/pkg/v3: v3.5.9 → v3.5.10 +- go.etcd.io/etcd/raft/v3: v3.5.9 → v3.5.10 +- go.etcd.io/etcd/server/v3: v3.5.9 → v3.5.10 +- go.uber.org/zap: v1.25.0 → v1.26.0 +- golang.org/x/crypto: v0.15.0 → v0.21.0 +- golang.org/x/lint: 83fdc39 → 910be7a +- golang.org/x/mod: v0.13.0 → v0.14.0 +- golang.org/x/net: v0.18.0 → v0.23.0 +- golang.org/x/oauth2: v0.14.0 → v0.18.0 +- golang.org/x/sync: v0.4.0 → v0.6.0 +- golang.org/x/sys: v0.14.0 → v0.18.0 +- golang.org/x/term: v0.14.0 → v0.18.0 +- golang.org/x/time: v0.3.0 → v0.5.0 +- golang.org/x/tools: v0.14.0 → v0.17.0 +- google.golang.org/api: v0.143.0 → v0.153.0 +- google.golang.org/genproto/googleapis/api: 007df8e → bbf56f3 +- google.golang.org/genproto/googleapis/rpc: e6e6cda → 83a465c +- google.golang.org/genproto: 007df8e → bbf56f3 +- google.golang.org/protobuf: v1.31.0 → v1.33.0 +- honnef.co/go/tools: v0.0.1-2020.1.4 → v0.0.1-2019.2.3 +- k8s.io/api: v0.28.4 → v0.29.3 +- k8s.io/apiextensions-apiserver: v0.28.4 → v0.29.3 +- k8s.io/apimachinery: v0.28.4 → v0.29.3 +- k8s.io/apiserver: v0.28.4 → v0.29.3 +- k8s.io/cli-runtime: v0.28.4 → v0.29.3 +- k8s.io/client-go: v0.28.4 → v0.29.3 +- k8s.io/cluster-bootstrap: v0.28.4 → v0.29.3 +- k8s.io/code-generator: v0.28.4 → v0.29.3 +- k8s.io/component-base: v0.28.4 → v0.29.3 +- k8s.io/component-helpers: v0.28.4 → v0.29.3 +- k8s.io/gengo: c0856e2 → 9cce18d +- k8s.io/klog/v2: v2.100.1 → v2.110.1 +- k8s.io/kms: v0.28.4 → v0.29.3 +- k8s.io/kube-openapi: 2695361 → 2dd684a +- k8s.io/kubectl: v0.28.4 → v0.29.3 +- k8s.io/metrics: v0.28.4 → v0.29.3 +- k8s.io/utils: d93618c → b307cd5 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.1.2 → v0.28.0 +- sigs.k8s.io/controller-runtime: v0.16.3 → v0.17.3 +- sigs.k8s.io/structured-merge-diff/v4: v4.2.3 → v4.4.1 + +### Removed +- github.com/benbjohnson/clock: [v1.3.0](https://github.com/benbjohnson/clock/tree/v1.3.0) +- github.com/docker/distribution: [v2.8.3+incompatible](https://github.com/docker/distribution/tree/v2.8.3) +- github.com/google/martian/v3: [v3.1.0](https://github.com/google/martian/tree/v3.1.0) +- github.com/minio/highwayhash: [v1.0.2](https://github.com/minio/highwayhash/tree/v1.0.2) +- github.com/nats-io/jwt/v2: [v2.4.1](https://github.com/nats-io/jwt/tree/v2.4.1) +- go.opentelemetry.io/otel/exporters/otlp/internal/retry: v1.10.0 + +
+
+ +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.7.0.md b/CHANGELOG/v1.7.0.md new file mode 100644 index 000000000000..8a52eed4d457 --- /dev/null +++ b/CHANGELOG/v1.7.0.md @@ -0,0 +1,429 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.26.x -> v1.29.x +- Workload Cluster: v1.24.x -> v1.29.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + + +## Highlights + +* MachinePool: promoted to beta and enabled per default (#10141) +* MachineDeployment: Taint nodes with PreferNoSchedule during rollouts (#10223) +* MachineHealthCheck: implement annotation to manually mark machines for remediation (#10202) +* KCP improvements: + * Enable kubeadm feature gates mutation (#10154) + * Defer remediation when a control plane machine is still provisioning (#9734) + * Prefer deletion of Machines with unhealthy control plane components during rollout (#10421) +* ClusterClass: + * Add variable metadata (#10308) + * Improved version validation if an update is already in progress (#10063) + * Improved unit and e2e test coverage for ClusterClass with MachinePools (#10028) +* Various improvements to our e2e tests to increase test coverage and reduce flakes to improve our CI signal +* Implemented prowjob-gen to make it easier to manage our ProwJobs (#9937) + +## Changes since v1.6.0 +## :chart_with_upwards_trend: Overview +- 339 new commits merged +- 5 breaking changes :warning: +- 12 feature additions ✨ +- 48 bugs fixed 🐛 + +## :memo: Proposals +- MachinePool: Update MachinePool Machines proposal with diagrams (#9664) + +## :warning: Breaking Changes +- Dependency: Bump to controller-runtime v0.17.0 & k8s.io/*v0.29 (#9964) +- Logging: Make event recorder names consistent for KCP and topology controller (#9862) +- MachinePool: Set MachinePool feature flag default to true + Beta (#10141) +- KCP/MachineSet: Objects generated by KCP, MachineSets and MachinePools will now consistently use machine name (#9833) + +## :sparkles: New Features +- API: Enable kubeadm feature gates mutation (#10154) +- clusterctl: Add k0smotron provider (#9991) +- clusterctl: Add public function to create new CRD migrator (#10075) +- Control-plane: Allow mutation of all fields that should be mutable (#9871) +- MachineDeployment: Add MachineSetReady condition to MachineDeployment (#9262) +- MachineDeployment: Taint nodes with PreferNoSchedule during rollouts (#10223) +- MachineHealthCheck: implement annotation to manually mark machines for remediation (#10202) +- Release: Add dependencies section to release notes tool (#10043) +- Release: Add release notes expander functionality (#10091) +- Release: Add utility to create git issues on provider repo (#9110) +- Testing: Resolve release markers (#9265) +- Testing: Prepare quickstart, capd and tests for the new release including kind bump (#9872) + +## :bug: Bug Fixes +- API: Use ptr instead of own implementation (#10276) +- API: Restore v1alpha3/v1alpha4 conversion to fix SSA issue & add e2e test coverage (#9939 #10147) (for more details see #10051) +- CABPK: Only refresh bootstrap token if needed, requeue in all cases where node hasn't joined yet (#9229) +- CAPD: Fix ignition to also set the kube-proxy configuration to skip setting sysctls (#9894) +- CAPD: Remove --enable-hostpath-provisioner flag (#10271) +- CAPD: Remove duplicate fix for btrfs/zfs support (#8376) +- ClusterCacheTracker: Fix ClusterCacheTracker memory leak (#9543) +- ClusterCacheTracker: Use RequeueAfter instead of immediate requeue on ErrClusterLocked to not have exponentially increasing requeue time (#9810) +- ClusterClass: Fix for TestServerSideApplyWithDefaulting (#10307) +- ClusterClass: Improve handling of topology orphaned objects (#10277) +- clusterctl: Move handlePlugins function call out of init to allow debugging tests (#10200) +- clusterctl: Validate no objects exist from CRDs before deleting them (#9808) +- clusterctl: Verify that there is a release for the tag (#10423) +- ClusterResourceSet: Requeue after 1 minute if ErrClusterLocked got hit (#9777) +- Control-plane: KCP should defer remediation when a control plane machine is still provisioning (#9734) +- Devtools: Show default cluster-template (#9820) +- e2e: Add wait for MachineList to be available (#10301) +- e2e: fix unset GINKGO_FOCUS variable (#10085) +- e2e: Fix clusterctl upgrade e2e tests (enable CRS) (#10340) +- e2e: Fix kubetest to allow parallel execution on different clusters (#10426) +- e2e: Make MachinePools and MachineDeployments optional in ApplyClusterTemplateAndWait (#9960) +- e2e: Re-introduce exclude capi-webhook-system to fix test flake (#10157) +- e2e: Calculate correct worker count in clusterctl upgrade test (#9892) +- e2e: Fix finalizers test to not only rely on namespaced name (#9891) +- e2e: retry GetOwnerGraph in owner references test on certificate errors (#10201) +- IPAM: Fix webhooks using mixed api versions (#9861) +- KCP: Delete out of date machines with unhealthy control plane component conditions when rolling out KCP (#10421) +- KCP: Skip checking `clusterConfiguration.dns` fields when KCP checking MachineNeedRollout (#9857) +- Logging: Improve log k/v pairs and a improve/drop a few log lines (#9813) +- Machine: Bubble up machine drain condition in `MachinesReadyCondition` (#9355) +- Machine: Watch external objects for machine before deleting (#10041) +- MachinePool: Fix TestReconcileMachinePoolScaleToFromZero flakes (#9745) +- MachineSet: deletion priority to avoid deleting too many machines (#10430) +- Release: Fix Community meeting area formatting in release notes (#9784) +- Release: Fix defaulting logic in release notes tool (#9958) +- Release: Fix documentation area comparison in release notes (#9769) +- Release: Fix kubeadm bootstrap prefix in release notes (#9814) +- Release: Fix wrong branch name display for weekly update script (#9918) +- Runtime SDK: Fix ClusterClass variables status & RuntimeExtension and add test coverage (#10337) +- Runtime SDK: Use keys/values structured log interface (#9998) +- Testing: Fix using correct testing.T when creating gomega object (#10342) +- Testing: Revert "Watch for Cluster resources in topology MD controller" (#9985) +- Testing: Reverting ginkgo.show-node-events to ginkgo.progress (#10214) +- Testing: fix flaky test TestPatch/Test patch with Machine (#9914) +- Testing: wait for topology to get rolled out before continuing with scaling checks (#9819) +- util: Add tests and update provider_issues.go (#10264) +- util: Checking cert's keypair for nil before accessing to avoid panics (#10368) +- util: recover gvk after scheme.Convert (#10409) + +## :seedling: Others +- API: Stop relying on GVK being set on regular typed objects (#9956) +- Bootstrap: Add MachinePool test for switching bootstrap config to another ready/non-ready object with different name (#9616) +- CABPK: Add pod metadata to capbk manager (#10208) +- CAPD: make docker machine bootstrap timeout configurable (#9952) +- CAPD: Remove requeues in DockerMachinePool (#9725) +- CAPD: Support configuring ExtraPortMappings for the kind cluster (#10046) +- CAPIM: Add namespaces to the list of resource handled by the in-memory API server (#10297) +- CAPIM: Allow using different resource group and listener name with the in memory server (#10096) +- CAPIM: Make in memory API server more tolerant when starting (#10211) +- CAPIM: Make in memory runtime and server accessible from outside (#9986) +- CAPIM: use port only to identify the wcl to make port-forward… (#10245) +- CI: Add fail fast to DumpResourcesForCluster in case of no route to host (#10204) +- CI: Add more templating func to prowjob-gen (#10403) +- CI: Added go directive test (#10261) +- CI: Bump conversion-gen to v0.29.0 (#10012) +- CI: Bump go-apidiff to v0.8.2 (#10011) +- CI: Bump govulncheck to v1.0.4 (#10274) +- CI: Bump kpromo to v4.0.5 (#10140) +- CI: Bump kubebuilder envtest to 1.29.0 (#10013) +- CI: Change base branch for link checker (#10365) +- CI: DumpResourcesForCluster should fail fast for i/o errors (#10238) +- CI: Ensure build images contains correct binary and for correct architecture (#9932) +- CI: Fix Make target generate-go-openapi (#10161) +- CI: Fix TestPatchNode flake (#10287) +- CI: Hack/prowjob-gen skip creating empty files (#10022) +- CI: implement generator for prowjobs (#9937) +- CI: Makefile bump dependencies (#10236) +- CI: Bump WorkloadKubernetesVersion for v1.6 clusterctl upgrade test (#10017) +- CI: adjust capd dockerfile so the binary exposes the package in the built binraries path variable (#10030) +- CI: Update GH actions to work with new release-1.6 branch (#9708) +- ClusterClass: Add ClusterClass variables metadata (#10308) +- ClusterClass: Add unit tests for MachinePools for webhooks and cc controller (#10055) +- ClusterClass: Add unit tests for MachinePools in topology/scope package (#10052) +- ClusterClass: Implement topology validations for topology kubernetes version upgrades (#10063) +- ClusterClass: use the alias for ClusterCacheTrackerReader instead of the internal reference (#10309) +- clusterctl: Add 0 default to worker-machine-count help (#10203) +- clusterctl: Add completion for fish shell (#9950) +- clusterctl: Add hivelocity infra provider to clusterctl (#10168) +- clusterctl: Add in-cluster ipam provider (#8811) +- clusterctl: Add Proxmox provider quickstart (#9798) +- clusterctl: Better verbose logging on override path (#10180) +- clusterctl: Bump cert-manager to 1.14.2 (#10126) +- clusterctl: Bump cert-manager to 1.14.4 (#10267) +- clusterctl: Bump cert-manager to v1.14.1 (#10113) +- clusterctl: Clarify rules for adding new clusterctl default providers (#9975) +- clusterctl: Deprecate clusterctl alpha topology plan (#10139) +- clusterctl: Replace context.TODO() from clusterctl proxy.go (#9776) +- Community meeting: Move CecileRobertMichon to emeritus (#10042) +- Community meeting: Move ykakarap to emeritus approvers (#10000) +- Community meeting: Promote chrischdi to Cluster API maintainer (#9997) +- Dependency: Bump `github.com/docker/docker` from 24.0.7 -> 25.0.0 (#10057) +- Dependency: Bump `golangci-lint` to v1.56.1 (#10124) +- Dependency: Bump controller-tools to v0.14 (#9987) +- Dependency: Bump Go to 1.21.5 (#9900) +- Dependency: Bump go version to 1.21.8 (#10235) +- Dependency: Bump golang to v1.21.9 and golang.org/x/net to mitigate CVE-2023-45288 (#10378) +- Dependency: Bump kind version to v0.22.0 (#10094) +- Dependency: Bump to Go 1.20.12 (#9840) +- Dependency: Bump github.com/docker/docker from 25.0.4+incompatible to 25.0.5+incompatible (#10302) +- Devtools: Add missing folder to clean-tilt make target (#9934) +- Devtools: Allow tilt provider with pre-build images (#10244) +- Devtools: Explicitly set golangci config for sub modules (#9821) +- Devtools: Fix variables names in Tiltfile (#9811) +- Devtools: Implement privileged namespace security policy update for tilt-prepare (#10178) +- Devtools: Simplify testing nightly builds with clusterctl (#10018) +- Devtools: Small improvements to tilt (#9936) +- e2e: Add conformance e2e tests (#10060) +- e2e: Add DeepCopy method for E2EConfig (#9988) +- e2e: Add PostCreateNamespace hook to E2E tests (#10067) +- e2e: Add test util to resolve Kubernetes versions (#9995) +- e2e: Allow to specify cluster name for E2E quick-start (#10088) +- e2e: Bump cluster-autoscaler to v1.29.0 (#9942) +- e2e: Drop duplicated scale test (#9968) +- e2e: Enable ability to test pre-releases of kubernetes (#10415) +- e2e: Fix ci-latest test to actually use ci latest (#10080) +- e2e: Fix CRS e2e helper with multiple bindings (#10191) +- e2e: Improve output of exec.KubectlApply (#9737) +- e2e: Make etcd and CoreDNS optional in upgrade and self-hosted tests (#9963) +- e2e: add WatchDaemonSetLogsByLabelSelector method (#9984) +- e2e: expose CopyAndAmendClusterctlConfig function (#10086) +- e2e: combine Finalizers tests to default quick start tests (#10039) +- e2e: Use --wait-providers in test framework InitWithBinary func (#10149) +- e2e: Use manager in test extension (#10097) +- Machine: Be more explicit when skipping node deletion (#10137) +- Machine: Mark Machine healthy condition as unknown if we can't list wl nodes (#9864) +- MachineDeployment: Set revision on machinedeployment and patch via defer (#10160) +- MachinePool: Use SSA Patch to create machines in MP controller (#9791) +- MachineSet: improve replica defaulting for autoscaler (#9649) +- MachineSet: KCP/MS remove hardcoded WithStepCounterIf(false) (#9772) +- Release: Add integration test for release notes tool (#9617) +- Release: Bump kubebuilder-release-tools to v0.4.3 (#9818) +- Release: Changelog entry to include cert-manager to v1.14.2 (#10188) +- Release: Determine release type from tag to also handle beta releases (#10324) +- Release: Fix dockerfile for clusterctl build (#10058) +- Release: Fix grammar in release script (#9981) +- Release: Improve release-staging build (#10095) +- Release: Improve weekly PR update generation script and documentation (#10092) +- Release: Make release notes tool not dependent on local git (#9618) +- Release: Prep main branch for v1.7 development (#9799) +- Release: Provide faster support for new k8s minor releases (#9971) +- Release: Read in dockerfiles from stdin (#9990) +- Release: also detect alpha releases as pre releases (#10379) +- Release: Swap in new 1.7 RT members in OWNERS (#9855) +- Runtime SDK: Add more helper functions in topologymutation variable.go to help unmarshal variables (#9670) +- Runtime SDK: Enable integration tests of RuntimeExtensions (#10330) +- Testing: Add scale testing for upgrades (#9077) +- Testing: Drop policy v1beta1 API support in the testing framework (#10158) +- Testing: Drop unused runtime features for the in-memory provider (#9778) +- Testing: Fix typo in test framework (#9873) +- Testing: Fixing kubetest warnings for deprecations (#10172) +- Testing: Print conformance image used in kubetest (#10076) +- Testing: Remove k8s.io/utils/pointer from repository (#9836) +- Testing: Small improvements to the inmemory api server (#9935) +- Testing: add dynamic finalizer assertions for e2e framework (#9970) +- Testing: Watch for Cluster resources in topology MachineSet & MachineDeployment controllers (#10029) +- Testing: Watch for Cluster resources in topology MD controller (#9865) +- util: Improve patch helper error handling (#9946) +- util: Use min/max funcs from Go SDK (#9945) + +:book: Additionally, there have been 53 contributions to our documentation and book. (#10005, #10031, #10040, #10061, #10066, #10068, #10084, #10099, #10100, #10115, #10122, #10170, #10174, #10194, #10239, #10257, #10268, #10288, #10289, #10323, #10329, #10334, #10381, #10393, #10410, #9585, #9640, #9767, #9771, #9779, #9782, #9786, #9794, #9797, #9801, #9817, #9829, #9831, #9838, #9856, #9866, #9867, #9868, #9876, #9896, #9897, #9908, #9941, #9949, #9957, #9961, #9972, #9993) + +## Dependencies (main go module) + +### Added +- github.com/matttproud/golang_protobuf_extensions/v2: [v2.0.0](https://github.com/matttproud/golang_protobuf_extensions/tree/v2.0.0) + +### Changed +- cloud.google.com/go/accessapproval: v1.7.1 → v1.7.4 +- cloud.google.com/go/accesscontextmanager: v1.8.1 → v1.8.4 +- cloud.google.com/go/aiplatform: v1.48.0 → v1.52.0 +- cloud.google.com/go/analytics: v0.21.3 → v0.21.6 +- cloud.google.com/go/apigateway: v1.6.1 → v1.6.4 +- cloud.google.com/go/apigeeconnect: v1.6.1 → v1.6.4 +- cloud.google.com/go/apigeeregistry: v0.7.1 → v0.8.2 +- cloud.google.com/go/appengine: v1.8.1 → v1.8.4 +- cloud.google.com/go/area120: v0.8.1 → v0.8.4 +- cloud.google.com/go/artifactregistry: v1.14.1 → v1.14.6 +- cloud.google.com/go/asset: v1.14.1 → v1.15.3 +- cloud.google.com/go/assuredworkloads: v1.11.1 → v1.11.4 +- cloud.google.com/go/automl: v1.13.1 → v1.13.4 +- cloud.google.com/go/baremetalsolution: v1.1.1 → v1.2.3 +- cloud.google.com/go/batch: v1.3.1 → v1.6.3 +- cloud.google.com/go/beyondcorp: v1.0.0 → v1.0.3 +- cloud.google.com/go/bigquery: v1.53.0 → v1.57.1 +- cloud.google.com/go/billing: v1.16.0 → v1.17.4 +- cloud.google.com/go/binaryauthorization: v1.6.1 → v1.7.3 +- cloud.google.com/go/certificatemanager: v1.7.1 → v1.7.4 +- cloud.google.com/go/channel: v1.16.0 → v1.17.3 +- cloud.google.com/go/cloudbuild: v1.13.0 → v1.14.3 +- cloud.google.com/go/clouddms: v1.6.1 → v1.7.3 +- cloud.google.com/go/cloudtasks: v1.12.1 → v1.12.4 +- cloud.google.com/go/compute: v1.23.0 → v1.23.3 +- cloud.google.com/go/contactcenterinsights: v1.10.0 → v1.11.3 +- cloud.google.com/go/container: v1.24.0 → v1.27.1 +- cloud.google.com/go/containeranalysis: v0.10.1 → v0.11.3 +- cloud.google.com/go/datacatalog: v1.16.0 → v1.18.3 +- cloud.google.com/go/dataflow: v0.9.1 → v0.9.4 +- cloud.google.com/go/dataform: v0.8.1 → v0.9.1 +- cloud.google.com/go/datafusion: v1.7.1 → v1.7.4 +- cloud.google.com/go/datalabeling: v0.8.1 → v0.8.4 +- cloud.google.com/go/dataplex: v1.9.0 → v1.11.1 +- cloud.google.com/go/dataproc/v2: v2.0.1 → v2.2.3 +- cloud.google.com/go/dataqna: v0.8.1 → v0.8.4 +- cloud.google.com/go/datastore: v1.13.0 → v1.15.0 +- cloud.google.com/go/datastream: v1.10.0 → v1.10.3 +- cloud.google.com/go/deploy: v1.13.0 → v1.14.2 +- cloud.google.com/go/dialogflow: v1.40.0 → v1.44.3 +- cloud.google.com/go/dlp: v1.10.1 → v1.11.1 +- cloud.google.com/go/documentai: v1.22.0 → v1.23.5 +- cloud.google.com/go/domains: v0.9.1 → v0.9.4 +- cloud.google.com/go/edgecontainer: v1.1.1 → v1.1.4 +- cloud.google.com/go/essentialcontacts: v1.6.2 → v1.6.5 +- cloud.google.com/go/eventarc: v1.13.0 → v1.13.3 +- cloud.google.com/go/filestore: v1.7.1 → v1.7.4 +- cloud.google.com/go/firestore: v1.13.0 → v1.14.0 +- cloud.google.com/go/functions: v1.15.1 → v1.15.4 +- cloud.google.com/go/gkebackup: v1.3.0 → v1.3.4 +- cloud.google.com/go/gkeconnect: v0.8.1 → v0.8.4 +- cloud.google.com/go/gkehub: v0.14.1 → v0.14.4 +- cloud.google.com/go/gkemulticloud: v1.0.0 → v1.0.3 +- cloud.google.com/go/gsuiteaddons: v1.6.1 → v1.6.4 +- cloud.google.com/go/iam: v1.1.1 → v1.1.5 +- cloud.google.com/go/iap: v1.8.1 → v1.9.3 +- cloud.google.com/go/ids: v1.4.1 → v1.4.4 +- cloud.google.com/go/iot: v1.7.1 → v1.7.4 +- cloud.google.com/go/kms: v1.15.0 → v1.15.5 +- cloud.google.com/go/language: v1.10.1 → v1.12.2 +- cloud.google.com/go/lifesciences: v0.9.1 → v0.9.4 +- cloud.google.com/go/logging: v1.7.0 → v1.8.1 +- cloud.google.com/go/longrunning: v0.5.1 → v0.5.4 +- cloud.google.com/go/managedidentities: v1.6.1 → v1.6.4 +- cloud.google.com/go/maps: v1.4.0 → v1.6.1 +- cloud.google.com/go/mediatranslation: v0.8.1 → v0.8.4 +- cloud.google.com/go/memcache: v1.10.1 → v1.10.4 +- cloud.google.com/go/metastore: v1.12.0 → v1.13.3 +- cloud.google.com/go/monitoring: v1.15.1 → v1.16.3 +- cloud.google.com/go/networkconnectivity: v1.12.1 → v1.14.3 +- cloud.google.com/go/networkmanagement: v1.8.0 → v1.9.3 +- cloud.google.com/go/networksecurity: v0.9.1 → v0.9.4 +- cloud.google.com/go/notebooks: v1.9.1 → v1.11.2 +- cloud.google.com/go/optimization: v1.4.1 → v1.6.2 +- cloud.google.com/go/orchestration: v1.8.1 → v1.8.4 +- cloud.google.com/go/orgpolicy: v1.11.1 → v1.11.4 +- cloud.google.com/go/osconfig: v1.12.1 → v1.12.4 +- cloud.google.com/go/oslogin: v1.10.1 → v1.12.2 +- cloud.google.com/go/phishingprotection: v0.8.1 → v0.8.4 +- cloud.google.com/go/policytroubleshooter: v1.8.0 → v1.10.2 +- cloud.google.com/go/privatecatalog: v0.9.1 → v0.9.4 +- cloud.google.com/go/recaptchaenterprise/v2: v2.7.2 → v2.8.3 +- cloud.google.com/go/recommendationengine: v0.8.1 → v0.8.4 +- cloud.google.com/go/recommender: v1.10.1 → v1.11.3 +- cloud.google.com/go/redis: v1.13.1 → v1.14.1 +- cloud.google.com/go/resourcemanager: v1.9.1 → v1.9.4 +- cloud.google.com/go/resourcesettings: v1.6.1 → v1.6.4 +- cloud.google.com/go/retail: v1.14.1 → v1.14.4 +- cloud.google.com/go/run: v1.2.0 → v1.3.3 +- cloud.google.com/go/scheduler: v1.10.1 → v1.10.4 +- cloud.google.com/go/secretmanager: v1.11.1 → v1.11.4 +- cloud.google.com/go/security: v1.15.1 → v1.15.4 +- cloud.google.com/go/securitycenter: v1.23.0 → v1.24.2 +- cloud.google.com/go/servicedirectory: v1.11.0 → v1.11.3 +- cloud.google.com/go/shell: v1.7.1 → v1.7.4 +- cloud.google.com/go/spanner: v1.47.0 → v1.51.0 +- cloud.google.com/go/speech: v1.19.0 → v1.20.1 +- cloud.google.com/go/storage: v1.14.0 → v1.35.1 +- cloud.google.com/go/storagetransfer: v1.10.0 → v1.10.3 +- cloud.google.com/go/talent: v1.6.2 → v1.6.5 +- cloud.google.com/go/texttospeech: v1.7.1 → v1.7.4 +- cloud.google.com/go/tpu: v1.6.1 → v1.6.4 +- cloud.google.com/go/trace: v1.10.1 → v1.10.4 +- cloud.google.com/go/translate: v1.8.2 → v1.9.3 +- cloud.google.com/go/video: v1.19.0 → v1.20.3 +- cloud.google.com/go/videointelligence: v1.11.1 → v1.11.4 +- cloud.google.com/go/vision/v2: v2.7.2 → v2.7.5 +- cloud.google.com/go/vmmigration: v1.7.1 → v1.7.4 +- cloud.google.com/go/vmwareengine: v1.0.0 → v1.0.3 +- cloud.google.com/go/vpcaccess: v1.7.1 → v1.7.4 +- cloud.google.com/go/webrisk: v1.9.1 → v1.9.4 +- cloud.google.com/go/websecurityscanner: v1.6.1 → v1.6.4 +- cloud.google.com/go/workflows: v1.11.1 → v1.12.3 +- cloud.google.com/go: v0.110.7 → v0.110.10 +- github.com/cloudflare/circl: [v1.3.3 → v1.3.7](https://github.com/cloudflare/circl/compare/v1.3.3...v1.3.7) +- github.com/evanphx/json-patch/v5: [v5.7.0 → v5.9.0](https://github.com/evanphx/json-patch/compare/v5.7.0...v5.9.0) +- github.com/evanphx/json-patch: [v5.6.0+incompatible → v5.7.0+incompatible](https://github.com/evanphx/json-patch/compare/v5.6.0...v5.7.0) +- github.com/frankban/quicktest: [v1.14.4 → v1.14.6](https://github.com/frankban/quicktest/compare/v1.14.4...v1.14.6) +- github.com/fsnotify/fsnotify: [v1.6.0 → v1.7.0](https://github.com/fsnotify/fsnotify/compare/v1.6.0...v1.7.0) +- github.com/go-logr/logr: [v1.3.0 → v1.4.1](https://github.com/go-logr/logr/compare/v1.3.0...v1.4.1) +- github.com/go-logr/zapr: [v1.2.4 → v1.3.0](https://github.com/go-logr/zapr/compare/v1.2.4...v1.3.0) +- github.com/golang/mock: [v1.4.4 → v1.4.0](https://github.com/golang/mock/compare/v1.4.4...v1.4.0) +- github.com/golang/protobuf: [v1.5.3 → v1.5.4](https://github.com/golang/protobuf/compare/v1.5.3...v1.5.4) +- github.com/google/cel-go: [v0.16.1 → v0.17.7](https://github.com/google/cel-go/compare/v0.16.1...v0.17.7) +- github.com/google/uuid: [v1.3.1 → v1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) +- github.com/googleapis/enterprise-certificate-proxy: [v0.3.1 → v0.3.2](https://github.com/googleapis/enterprise-certificate-proxy/compare/v0.3.1...v0.3.2) +- github.com/googleapis/google-cloud-go-testing: [bcd43fb → 1c9a4c6](https://github.com/googleapis/google-cloud-go-testing/compare/bcd43fb...1c9a4c6) +- github.com/gorilla/websocket: [v1.4.2 → v1.5.0](https://github.com/gorilla/websocket/compare/v1.4.2...v1.5.0) +- github.com/nats-io/nats.go: [v1.30.2 → v1.31.0](https://github.com/nats-io/nats.go/compare/v1.30.2...v1.31.0) +- github.com/nats-io/nkeys: [v0.4.5 → v0.4.6](https://github.com/nats-io/nkeys/compare/v0.4.5...v0.4.6) +- github.com/onsi/ginkgo/v2: [v2.13.1 → v2.17.1](https://github.com/onsi/ginkgo/compare/v2.13.1...v2.17.1) +- github.com/onsi/gomega: [v1.30.0 → v1.32.0](https://github.com/onsi/gomega/compare/v1.30.0...v1.32.0) +- github.com/pkg/sftp: [v1.13.1 → v1.13.6](https://github.com/pkg/sftp/compare/v1.13.1...v1.13.6) +- github.com/prometheus/client_golang: [v1.17.0 → v1.18.0](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0) +- github.com/prometheus/client_model: [9a2bf30 → v0.5.0](https://github.com/prometheus/client_model/compare/9a2bf30...v0.5.0) +- github.com/prometheus/common: [v0.44.0 → v0.45.0](https://github.com/prometheus/common/compare/v0.44.0...v0.45.0) +- github.com/prometheus/procfs: [v0.11.1 → v0.12.0](https://github.com/prometheus/procfs/compare/v0.11.1...v0.12.0) +- github.com/sagikazarmark/crypt: [v0.15.0 → v0.17.0](https://github.com/sagikazarmark/crypt/compare/v0.15.0...v0.17.0) +- github.com/sagikazarmark/locafero: [v0.3.0 → v0.4.0](https://github.com/sagikazarmark/locafero/compare/v0.3.0...v0.4.0) +- github.com/spf13/afero: [v1.10.0 → v1.11.0](https://github.com/spf13/afero/compare/v1.10.0...v1.11.0) +- github.com/spf13/cast: [v1.5.1 → v1.6.0](https://github.com/spf13/cast/compare/v1.5.1...v1.6.0) +- github.com/spf13/viper: [v1.17.0 → v1.18.2](https://github.com/spf13/viper/compare/v1.17.0...v1.18.2) +- go.etcd.io/bbolt: v1.3.7 → v1.3.8 +- go.etcd.io/etcd/api/v3: v3.5.10 → v3.5.13 +- go.etcd.io/etcd/client/pkg/v3: v3.5.10 → v3.5.13 +- go.etcd.io/etcd/client/v2: v2.305.9 → v2.305.10 +- go.etcd.io/etcd/client/v3: v3.5.10 → v3.5.13 +- go.etcd.io/etcd/pkg/v3: v3.5.9 → v3.5.10 +- go.etcd.io/etcd/raft/v3: v3.5.9 → v3.5.10 +- go.etcd.io/etcd/server/v3: v3.5.9 → v3.5.10 +- go.uber.org/zap: v1.25.0 → v1.26.0 +- golang.org/x/crypto: v0.15.0 → v0.21.0 +- golang.org/x/lint: 83fdc39 → 910be7a +- golang.org/x/mod: v0.13.0 → v0.14.0 +- golang.org/x/net: v0.18.0 → v0.23.0 +- golang.org/x/oauth2: v0.14.0 → v0.18.0 +- golang.org/x/sync: v0.4.0 → v0.6.0 +- golang.org/x/sys: v0.14.0 → v0.18.0 +- golang.org/x/term: v0.14.0 → v0.18.0 +- golang.org/x/time: v0.3.0 → v0.5.0 +- golang.org/x/tools: v0.14.0 → v0.17.0 +- google.golang.org/api: v0.143.0 → v0.153.0 +- google.golang.org/genproto/googleapis/api: 007df8e → bbf56f3 +- google.golang.org/genproto/googleapis/rpc: e6e6cda → 83a465c +- google.golang.org/genproto: 007df8e → bbf56f3 +- google.golang.org/protobuf: v1.31.0 → v1.33.0 +- honnef.co/go/tools: v0.0.1-2020.1.4 → v0.0.1-2019.2.3 +- k8s.io/api: v0.28.4 → v0.29.3 +- k8s.io/apiextensions-apiserver: v0.28.4 → v0.29.3 +- k8s.io/apimachinery: v0.28.4 → v0.29.3 +- k8s.io/apiserver: v0.28.4 → v0.29.3 +- k8s.io/cli-runtime: v0.28.4 → v0.29.3 +- k8s.io/client-go: v0.28.4 → v0.29.3 +- k8s.io/cluster-bootstrap: v0.28.4 → v0.29.3 +- k8s.io/code-generator: v0.28.4 → v0.29.3 +- k8s.io/component-base: v0.28.4 → v0.29.3 +- k8s.io/component-helpers: v0.28.4 → v0.29.3 +- k8s.io/gengo: c0856e2 → 9cce18d +- k8s.io/klog/v2: v2.100.1 → v2.110.1 +- k8s.io/kms: v0.28.4 → v0.29.3 +- k8s.io/kube-openapi: 2695361 → 2dd684a +- k8s.io/kubectl: v0.28.4 → v0.29.3 +- k8s.io/metrics: v0.28.4 → v0.29.3 +- k8s.io/utils: d93618c → b307cd5 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.1.2 → v0.28.0 +- sigs.k8s.io/controller-runtime: v0.16.3 → v0.17.3 +- sigs.k8s.io/structured-merge-diff/v4: v4.2.3 → v4.4.1 + +### Removed +- github.com/benbjohnson/clock: [v1.3.0](https://github.com/benbjohnson/clock/tree/v1.3.0) +- github.com/docker/distribution: [v2.8.3+incompatible](https://github.com/docker/distribution/tree/v2.8.3) +- github.com/google/martian/v3: [v3.1.0](https://github.com/google/martian/tree/v3.1.0) +- github.com/minio/highwayhash: [v1.0.2](https://github.com/minio/highwayhash/tree/v1.0.2) +- github.com/nats-io/jwt/v2: [v2.4.1](https://github.com/nats-io/jwt/tree/v2.4.1) +- go.opentelemetry.io/otel/exporters/otlp/internal/retry: v1.10.0 + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.7.1.md b/CHANGELOG/v1.7.1.md new file mode 100644 index 000000000000..33d1aef172e8 --- /dev/null +++ b/CHANGELOG/v1.7.1.md @@ -0,0 +1,39 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.26.x -> v1.30.x +- Workload Cluster: v1.24.x -> v1.30.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Highlights + +* Kubernetes v1.30 is now supported + +## Changes since v1.7.0 +## :chart_with_upwards_trend: Overview +- 11 new commits merged +- 2 bugs fixed 🐛 + +## :bug: Bug Fixes +- CAPD: Verify lb config after writing it (#10461) +- e2e: also gather junit reports in case of errors observed from ginkgo (#10494) + +## :seedling: Others +- Dependency: Bump envtest to v1.30.0 (#10481) +- e2e: Export more func in test/e2e/common.go (#10447) +- Testing: Bump Kubernetes in tests to v1.30.0 and claim support for v1.30 (#10465) + +:book: Additionally, there have been 6 contributions to our documentation and book. (#10446, #10448, #10451, #10456, #10470, #10491) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +_Nothing has changed._ + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.7.2.md b/CHANGELOG/v1.7.2.md new file mode 100644 index 000000000000..c2bd8e6ae3cc --- /dev/null +++ b/CHANGELOG/v1.7.2.md @@ -0,0 +1,40 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.26.x -> v1.30.x +- Workload Cluster: v1.24.x -> v1.30.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.7.1 +## :chart_with_upwards_trend: Overview +- 12 new commits merged +- 3 bugs fixed 🐛 + +## :bug: Bug Fixes +- clusterctl: Ensure cert-manager objects get applied before other provider objects (#10503) +- e2e: Test: Ensure all ownerRef assertions for some Kind are evaluated (#10592) +- e2e: Test: filter cluster-wide objects asserted in ResourceVersion tests to exclude objects of parallel tests (#10570) + +## :seedling: Others +- API: Allow users to specify webhook server cert and key names (#10581) +- clusterctl: Add Tinkerbell to the providers list (#10516) +- clusterctl: Bump cert-manager to 1.14.5 (#10517) +- clusterctl: Clusterctl/client/cert_manager: improve shouldUpgrade (#10497) +- clusterctl: Add support for the linode-linode infrastructure provider to clusterctl (#10512) +- e2e: Ensure resourceVersions are stable (#10548) +- KCP/MachineSet: Flag for old infra machine naming (#10587) + +:book: Additionally, there have been 2 contributions to our documentation and book. (#10535, #10536) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +_Nothing has changed._ + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7f8b6c02c5e8..4b493aa87122 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,17 +11,20 @@ - [CLIs](#clis) - [Branches](#branches) - [Support and guarantees](#support-and-guarantees) + - [Removal of v1alpha3 & v1alpha4 apiVersions](#removal-of-v1alpha3--v1alpha4-apiversions) - [Contributing a Patch](#contributing-a-patch) - [Documentation changes](#documentation-changes) - [Releases](#releases) - [Proposal process (CAEP)](#proposal-process-caep) +- [Triaging issues](#triaging-issues) - [Triaging E2E test failures](#triaging-e2e-test-failures) - [Reviewing a Patch](#reviewing-a-patch) -- [Reviews](#reviews) + - [Reviews](#reviews) - [Approvals](#approvals) - [Features and bugs](#features-and-bugs) - [Experiments](#experiments) - [Breaking Changes](#breaking-changes) +- [Dependency Licence Management](#dependency-licence-management) - [API conventions](#api-conventions) - [Optional vs. Required](#optional-vs-required) - [Example](#example) @@ -35,6 +38,8 @@ Read the following guide if you're interested in contributing to cluster-api. +Contributors who are not used to working in the Kubernetes ecosystem should also take a look at the Kubernetes [New Contributor Course.](https://www.kubernetes.dev/docs/onboarding/) + ## Contributor License Agreements We'd love to accept your patches! Before we can take them, we have to jump a couple of legal hurdles. @@ -92,14 +97,24 @@ We generally allow backports of following changes to all supported branches: - Dependency bumps for CVE (usually limited to CVE resolution; backports of non-CVE related version bumps are considered exceptions to be evaluated case by case) - Cert-manager version bumps (to avoid having releases with cert-manager versions that are out of support, when possible) - Changes required to support new Kubernetes versions, when possible. See [supported Kubernetes versions](https://cluster-api.sigs.k8s.io/reference/versions.html#supported-kubernetes-versions) for more details. -- Changes to use the latest Go patch release. If the Go minor version of a supported branch goes out of support, we will consider on a case-by-case basis - to bump to a newer Go minor version (e.g. to pick up CVE fixes). This could have impact on everyone importing Cluster API. +- Changes to use the latest Go patch version to build controller images. +- Changes to bump the Go minor version used to build controller images, if the Go minor version of a supported branch goes out of support (e.g. to pick up bug and CVE fixes). + This has no impact on folks importing Cluster API as we won't modify the version in `go.mod` and the version in the `Makefile` does not affect them. We generally allow backports of following changes only to the latest supported branch: - Improvements to existing docs (the latest supported branch hosts the current version of the book) - Improvements to CI signal - Improvements to the test framework +While we recommend to target following type of changes to the next minor release, CAPI maintainers will also consider +exceptions for backport of following changes only to the latest supported branch: +- Enhancements or additions to experimental Cluster API features, with the goal of allowing faster adoption and iteration; + Please note that stability of the branch will always be top priority while evaluating those PRs, and thus approval + requires /lgtm from at least two maintainers that, on top of checking that the backport is not introducing any breaking + change for either API or behavior, will evaluate if the impact of those backport is limited and well-scoped e.g. + by checking that those changes should not touch non-experimental code paths like utils and/or by applying other + considerations depending on the specific PR. + Like any other activity in the project, backporting a fix/change is a community-driven effort and requires that someone volunteers to own the task. In most cases, the cherry-pick bot can (and should) be used to automate opening a cherry-pick PR. @@ -142,22 +157,25 @@ Cluster API maintains the most recent release/releases for all supported API and | API Version | Supported Until | |--------------|-----------------------------------------------------------------------------------------| | **v1beta1** | TBD (current stable) | -| **v1alpha4** | EOL since 2022-04-06 ([apiVersion removal](#removal-of-v1alpha3--v1alpha4-apiversions)) | -| **v1alpha3** | EOL since 2022-02-23 ([apiVersion removal](#removal-of-v1alpha3--v1alpha4-apiversions)) | - For the current stable API version (v1beta1) we support the two most recent minor releases; older minor releases are immediately unsupported when a new major/minor release is available. - For older API versions we only support the most recent minor release until the API version reaches EOL. - We will maintain test coverage for all supported minor releases and for one additional release for the current stable API version in case we have to do an emergency patch release. - For example, if v1.2 and v1.3 are currently supported, we will also maintain test coverage for v1.1 for one additional release cycle. When v1.4 is released, tests for v1.1 will be removed. - -| Minor Release | API Version | Supported Until | -|---------------|--------------|------------------------------------------------------| -| v1.3.x | **v1beta1** | when v1.5.0 will be released | -| v1.2.x | **v1beta1** | when v1.4.0 will be released, tentatively March 2023 | -| v1.1.x | **v1beta1** | EOL since 2022-07-18 - v1.2.0 release date (*) | -| v1.0.x | **v1beta1** | EOL since 2022-02-02 - v1.1.0 release date (*) | -| v0.4.x | **v1alpha4** | EOL since 2022-04-06 - API version EOL | -| v0.3.x | **v1alpha3** | EOL since 2022-02-23 - API version EOL | + For example, if v1.6 and v1.7 are currently supported, we will also maintain test coverage for v1.5 for one additional release cycle. When v1.8 is released, tests for v1.5 will be removed. + +| Minor Release | API Version | Supported Until | +|---------------|--------------|------------------------------------------------| +| v1.8.x | **v1beta1** | when v1.10.0 will be released | +| v1.7.x | **v1beta1** | when v1.9.0 will be released | +| v1.6.x | **v1beta1** | when v1.8.0 will be released | +| v1.5.x | **v1beta1** | EOL since 2024-04-16 - v1.7.0 release date | +| v1.4.x | **v1beta1** | EOL since 2023-12-05 - v1.6.0 release date | +| v1.3.x | **v1beta1** | EOL since 2023-07-25 - v1.5.0 release date | +| v1.2.x | **v1beta1** | EOL since 2023-03-28 - v1.4.0 release date | +| v1.1.x | **v1beta1** | EOL since 2022-07-18 - v1.2.0 release date (*) | +| v1.0.x | **v1beta1** | EOL since 2022-02-02 - v1.1.0 release date (*) | +| v0.4.x | **v1alpha4** | EOL since 2022-04-06 - API version EOL | +| v0.3.x | **v1alpha3** | EOL since 2022-02-23 - API version EOL | (*) Previous support policy applies, older minor releases were immediately unsupported when a new major/minor release was available @@ -165,15 +183,15 @@ Cluster API maintains the most recent release/releases for all supported API and ### Removal of v1alpha3 & v1alpha4 apiVersions -We are going to remove the apiVersions in upcoming releases: -* v1.5: - * Kubernetes API server will stop serving the v1alpha3 apiVersion -* v1.6: - * v1alpha3 apiVersion will be removed from the CRDs - * Kubernetes API server will stop serving the v1alpha4 apiVersion -* v1.7 - * v1alpha4 apiVersion will be removed from the CRDs -For more details and latest information please see the following issue: [Removing v1alpha3 & v1alpha4 apiVersions](https://github.com/kubernetes-sigs/cluster-api/issues/8038). +Cluster API stopped to serve v1alpha3 API types from the v1.5 release and v1alpha4 types starting from the v1.6 release. +Those types still exist in Cluster API while we work to a fix (or a workaround) for https://github.com/kubernetes-sigs/cluster-api/issues/10051. +IMPORTANT! v1alpha3 and v1alpha4 types only exist for conversion and cannot be used by clients anymore. + +Note: Removal of a deprecated APIVersion in Kubernetes [can cause issues with garbage collection by the kube-controller-manager](https://github.com/kubernetes/kubernetes/issues/102641) +This means that some objects which rely on garbage collection for cleanup - e.g. MachineSets and their descendent objects, like Machines and InfrastructureMachines, may not be cleaned up properly if those +objects were created with an APIVersion which is no longer served. +To avoid these issues it's advised to ensure a restart to the kube-controller-manager is done after upgrading to a version of Cluster API which drops support for an APIVersion - e.g. v1.5 and v1.6. +This can be accomplished with any Kubernetes control-plane rollout, including a Kubernetes version upgrade, or by manually stopping and restarting the kube-controller-manager. ## Contributing a Patch @@ -227,30 +245,7 @@ When submitting the PR remember to label it with the 📖 (:book:) icon. ## Releases -- Minor versions CAN be planned and scheduled for each quarter, or sooner if necessary. - - Each minor version is preceded with one or more planning session. - - Planning consists of one or more backlog grooming meetings, roadmap amendments, - and CAEP proposal reviews. - - Cluster API uses [GitHub milestones](https://github.com/kubernetes-sigs/cluster-api/milestones) to track work - for minor releases. - - Adding an issue to a milestone provides forward visibility on what the next release will be, so, as soon as there - is the intent to work on an issue for a specific target release, contributors are expected to work with maintainers to - set the milestone on the issue so it will be tracked for the release (note: only major features/bug fixes specifically - targeting a release must be tracked; everything else will simply merge when ready without additional toil). - - Before adding an issue to a release milestone, maintainers must ensure that the issue have been triaged and - there is an assignee who expressed the intent to complete the work before the release date. - - An issue being in the milestone doesn't guarantee inclusion in the release; this depends on the work being - completed before the release code freeze target date. - - Code freeze is in effect at least 72 hours (3 days) before a major/minor release. - - Maintainers should communicate the code freeze date at a community meeting preceding the code freeze date. - - Only critical bug fixes may be merged in between freeze & release. - - Each bug MUST be associated with an open issue and properly triaged. - - PRs MUST be approved by at least 2 project maintainers. - - First approver should `/approve` and `/hold`. - - Second approver should `/approve` and `/hold cancel`. - - [E2E Test grid](https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi%20e2e%20tests) SHOULD be green before cutting a release. -- Patch versions CAN be planned and scheduled each month for supported minor releases. -- Dates in a release are approximations and always subject to change. +Cluster API release process is described in [this document](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-cycle.md). ## Proposal process (CAEP) @@ -267,6 +262,62 @@ The [template](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/pro - A proposal in a Google Doc MUST turn into a [Pull Request](https://github.com/kubernetes-sigs/cluster-api/pulls). - Proposals MUST be merged and in `implementable` state to be considered part of a major or minor release. +## Triaging issues + +Issue triage in Cluster API follows the best practices of the Kubernetes project while seeking balance with +the different size of this project. + +While the maintainers play an important role in the triage process described below, the help of the community is crucial +to ensure that this task is performed timely and be sustainable long term. + +| Phase | Responsible | What is required to move forward | +|---------------------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Initial triage | Maintainers | The issue MUST have:
- [priority/*](https://github.com/kubernetes-sigs/cluster-api/labels?q=priority) label
- [kind/*](https://github.com/kubernetes-sigs/cluster-api/labels?q=kind) label
| +| Triage finalization | Everyone | There should be consensus on the way forward and enough details for the issue being actionable | +| Triage finalization | Maintainers | The issue MUST have:
- `triage/accepted` label
label, plus eventually `help` or `good-first-issue` label | +| Actionable | Everyone | Contributors volunteering time to do the work and reviewers/approvers bandwidth
The issue being fixed | + +Please note that: + +- Priority provides an indication to everyone looking at issues. + - When assigning priority several factors are taken into consideration, including impact on users, relevance + for the upcoming releases, maturity of the issue (consensus + completeness). + - `priority/awaiting-more-evidence` is used to mark issue where there is not enough info to take a decision for + one of the other [priorities values](https://github.com/kubernetes-sigs/cluster-api/labels?q=priority). + - Priority can change over time, and everyone is welcome to provide constructive feedback about updating an issue's priority. + - Applying a priority label is not a commitment to execute within a certain time frame, because implementation + depends on contributors volunteering time to do the work and on reviewers/approvers bandwidth. + +- Closing inactive issues which are stuck in the "triage" phases is a crucial task for maintaining an + actionable backlog. Accordingly, the following automation applies to issues in the "triage" or the "refinement" phase: + - After 90 days of inactivity, issues will be marked with the `lifecycle/stale` label + - After 30 days of inactivity from when `lifecycle/stale` was applied, issues will be marked with the `lifecycle/rotten` label + - After 30 days of inactivity from when `lifecycle/rotten` was applied, issues will be closed. + With this regard, it is important to notice that closed issues are and will always be a highly valuable part of the + knowledge base about the Cluster API project, and they will never go away. + - Note: + - The automation above does not apply to issues triaged as `priority/critical-urgent`, `priority/important-soon` or `priority/important-longterm` + - Maintainers could apply the `lifecycle/frozen` label if they want to exclude an issue from the automation above + - Issues excluded from the automation above will be re-triaged periodically + +- If you really care about an issue stuck in the "triage" phases, you can engage with the community or + try to figure out what is holding back the issue by yourself, e.g.: + - Issue too generic or not yet actionable + - Lack of consensus or the issue is not relevant for other contributors + - Lack of contributors; in this case, finding ways to help and free up maintainers/other contributors time from other tasks + can really help to unblock your issues. + +- Issues in the "actionable" state are not subject to the stale/rotten/closed process; however, it is required to re-assess + them periodically given that the project change quickly. Accordingly, the following automation applies to issues + in the "actionable" phase: + - After 30 days of inactivity, the `triage/accepted` label will be removed from issues with `priority/critical-urgent` + - After 90 days of inactivity the `triage/accepted` label will be removed from issues with `priority/important-soon` + - After 1 year of inactivity the `triage/accepted` label will be removed from issues without `priority/critical-urgent` or `priority/important-soon` + +- If you really care about an issue stuck in the "actionable" phase, you can try to figure out what is holding back + the issue implementation (usually lack of contributors), engage with the community, find ways to help and free up + maintainers/other contributors time from other tasks, or `/assign` the issue and send a PR. + ## Triaging E2E test failures When you submit a change to the Cluster API repository as set of validation jobs is automatically executed by @@ -290,7 +341,7 @@ In case you want to run E2E test locally, please refer to the [Testing](https:// ## Reviewing a Patch -## Reviews +### Reviews > Parts of the following content have been adapted from https://google.github.io/eng-practices/review. @@ -395,6 +446,10 @@ There may, at times, need to be exceptions where breaking changes are allowed in discretion of the project's maintainers, and must be carefully considered before merging. An example of an allowed breaking change might be a fix for a behavioral bug that was released in an initial minor version (such as `v0.3.0`). +## Dependency Licence Management + +Cluster API follows the [license policy of the CNCF](https://github.com/cncf/foundation/blob/main/allowed-third-party-license-policy.md). This sets limits on which +licenses dependencies and other artifacts use. For go dependencies only dependencies listed in the `go.mod` are considered dependencies. This is in line with [how dependencies are reviewed in Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/vendor.md#reviewing-and-approving-dependency-changes). ## API conventions @@ -516,6 +571,7 @@ As of today there are following OWNERS files/Owner groups defining sub areas: - [kubeadm Control Plane Provider (KCP)](https://github.com/kubernetes-sigs/cluster-api/tree/main/controlplane/kubeadm) - [Cluster Managed topologies, ClusterClass](https://github.com/kubernetes-sigs/cluster-api/tree/main/internal/controllers/topology) - [Infrastructure Provider Docker (CAPD)](https://github.com/kubernetes-sigs/cluster-api/tree/main/test/infrastructure/docker) +- [Infrastructure Provider in-memory](https://github.com/kubernetes-sigs/cluster-api/tree/main/test/infrastructure/inmemory) - [Test](https://github.com/kubernetes-sigs/cluster-api/tree/main/test) - [Test Framework](https://github.com/kubernetes-sigs/cluster-api/tree/main/test/framework) - [Docs](https://github.com/kubernetes-sigs/cluster-api/tree/main/docs) diff --git a/Makefile b/Makefile index 2c4cababe066..6f10e841228c 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,8 @@ SHELL:=/usr/bin/env bash # # Go. # -GO_VERSION ?= 1.19.6 +GO_VERSION ?= 1.22.3 +GO_DIRECTIVE_VERSION ?= 1.22.0 GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION) # Use GOPROXY environment variable if set @@ -39,7 +40,7 @@ export GO111MODULE=on # # Kubebuilder. # -export KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.26.0 +export KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.30.0 export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT ?= 60s export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT ?= 60s @@ -59,22 +60,16 @@ BIN_DIR := bin TEST_DIR := test TOOLS_DIR := hack/tools TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/$(BIN_DIR)) +DOCS_DIR := docs E2E_FRAMEWORK_DIR := $(TEST_DIR)/framework CAPD_DIR := $(TEST_DIR)/infrastructure/docker +CAPIM_DIR := $(TEST_DIR)/infrastructure/inmemory TEST_EXTENSION_DIR := $(TEST_DIR)/extension GO_INSTALL := ./scripts/go_install.sh OBSERVABILITY_DIR := hack/observability export PATH := $(abspath $(TOOLS_BIN_DIR)):$(PATH) -# Set --output-base for conversion-gen if we are not within GOPATH -ifneq ($(abspath $(ROOT_DIR)),$(shell go env GOPATH)/src/sigs.k8s.io/cluster-api) - CONVERSION_GEN_OUTPUT_BASE := --output-base=$(ROOT_DIR) - CONVERSION_GEN_OUTPUT_BASE_CAPD := --output-base=$(ROOT_DIR)/$(CAPD_DIR) -else - export GOPATH := $(shell go env GOPATH) -endif - # # Ginkgo configuration. # @@ -101,27 +96,31 @@ get_go_version = $(shell go list -m $1 | awk '{print $$2}') # Binaries. # # Note: Need to use abspath so we can invoke these from subdirectories -KUSTOMIZE_VER := v4.5.2 +KUSTOMIZE_VER := v5.3.0 KUSTOMIZE_BIN := kustomize KUSTOMIZE := $(abspath $(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER)) -KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v4 +KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v5 -SETUP_ENVTEST_VER := v0.0.0-20211110210527-619e6b92dab9 +# This is a commit from CR main (22.05.2024). +# Intentionally using a commit from main to use a setup-envtest version +# that uses binaries from controller-tools, not GCS. +# CR PR: https://github.com/kubernetes-sigs/controller-runtime/pull/2811 +SETUP_ENVTEST_VER := v0.0.0-20240522175850-2e9781e9fc60 SETUP_ENVTEST_BIN := setup-envtest SETUP_ENVTEST := $(abspath $(TOOLS_BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER)) SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest -CONTROLLER_GEN_VER := v0.11.3 +CONTROLLER_GEN_VER := v0.15.0 CONTROLLER_GEN_BIN := controller-gen CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER)) CONTROLLER_GEN_PKG := sigs.k8s.io/controller-tools/cmd/controller-gen -GOTESTSUM_VER := v1.6.4 +GOTESTSUM_VER := v1.11.0 GOTESTSUM_BIN := gotestsum GOTESTSUM := $(abspath $(TOOLS_BIN_DIR)/$(GOTESTSUM_BIN)-$(GOTESTSUM_VER)) GOTESTSUM_PKG := gotest.tools/gotestsum -CONVERSION_GEN_VER := v0.26.0 +CONVERSION_GEN_VER := v0.30.0 CONVERSION_GEN_BIN := conversion-gen # We are intentionally using the binary without version suffix, to avoid the version # in generated files. @@ -133,56 +132,82 @@ ENVSUBST_VER := $(call get_go_version,github.com/drone/envsubst/v2) ENVSUBST := $(abspath $(TOOLS_BIN_DIR)/$(ENVSUBST_BIN)-$(ENVSUBST_VER)) ENVSUBST_PKG := github.com/drone/envsubst/v2/cmd/envsubst -GO_APIDIFF_VER := v0.6.0 +GO_APIDIFF_VER := v0.8.2 GO_APIDIFF_BIN := go-apidiff GO_APIDIFF := $(abspath $(TOOLS_BIN_DIR)/$(GO_APIDIFF_BIN)-$(GO_APIDIFF_VER)) GO_APIDIFF_PKG := github.com/joelanford/go-apidiff -HADOLINT_VER := v2.10.0 +HADOLINT_VER := v2.12.0 HADOLINT_FAILURE_THRESHOLD = warning SHELLCHECK_VER := v0.9.0 -KPROMO_VER := v3.4.5 +TRIVY_VER := 0.49.1 + +KPROMO_VER := v4.0.5 KPROMO_BIN := kpromo KPROMO := $(abspath $(TOOLS_BIN_DIR)/$(KPROMO_BIN)-$(KPROMO_VER)) -KPROMO_PKG := sigs.k8s.io/promo-tools/v3/cmd/kpromo +# KPROMO_PKG may have to be changed if KPROMO_VER increases its major version. +KPROMO_PKG := sigs.k8s.io/promo-tools/v4/cmd/kpromo -YQ_VER := v4.25.2 +YQ_VER := v4.35.2 YQ_BIN := yq YQ := $(abspath $(TOOLS_BIN_DIR)/$(YQ_BIN)-$(YQ_VER)) YQ_PKG := github.com/mikefarah/yq/v4 +PLANTUML_VER := 1.2024.3 + GINKGO_BIN := ginkgo -GINGKO_VER := $(call get_go_version,github.com/onsi/ginkgo/v2) -GINKGO := $(abspath $(TOOLS_BIN_DIR)/$(GINKGO_BIN)-$(GINGKO_VER)) +GINKGO_VER := $(call get_go_version,github.com/onsi/ginkgo/v2) +GINKGO := $(abspath $(TOOLS_BIN_DIR)/$(GINKGO_BIN)-$(GINKGO_VER)) GINKGO_PKG := github.com/onsi/ginkgo/v2/ginkgo +GOLANGCI_LINT_BIN := golangci-lint +GOLANGCI_LINT_VER := $(shell cat .github/workflows/pr-golangci-lint.yaml | grep [[:space:]]version: | sed 's/.*version: //') +GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN)-$(GOLANGCI_LINT_VER)) +GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint + +GOVULNCHECK_BIN := govulncheck +GOVULNCHECK_VER := v1.0.4 +GOVULNCHECK := $(abspath $(TOOLS_BIN_DIR)/$(GOVULNCHECK_BIN)-$(GOVULNCHECK_VER)) +GOVULNCHECK_PKG := golang.org/x/vuln/cmd/govulncheck + +IMPORT_BOSS_BIN := import-boss +IMPORT_BOSS_VER := v0.28.1 +IMPORT_BOSS := $(abspath $(TOOLS_BIN_DIR)/$(IMPORT_BOSS_BIN)) +IMPORT_BOSS_PKG := k8s.io/code-generator/cmd/import-boss + +TRIAGE_PARTY_IMAGE_NAME ?= extra/triage-party +TRIAGE_PARTY_CONTROLLER_IMG ?= $(STAGING_REGISTRY)/$(TRIAGE_PARTY_IMAGE_NAME) +TRIAGE_PARTY_DIR := hack/tools/triage +TRIAGE_PARTY_TMP_DIR ?= $(TRIAGE_PARTY_DIR)/triage-party.tmp +TRIAGE_PARTY_VERSION ?= v1.6.0 + CONVERSION_VERIFIER_BIN := conversion-verifier CONVERSION_VERIFIER := $(abspath $(TOOLS_BIN_DIR)/$(CONVERSION_VERIFIER_BIN)) -OPENAPI_GEN_VER := 5e7f5fd +OPENAPI_GEN_VER := dc4e619 # main branch as of 22.04.2024 OPENAPI_GEN_BIN := openapi-gen # We are intentionally using the binary without version suffix, to avoid the version # in generated files. OPENAPI_GEN := $(abspath $(TOOLS_BIN_DIR)/$(OPENAPI_GEN_BIN)) OPENAPI_GEN_PKG := k8s.io/kube-openapi/cmd/openapi-gen +PROWJOB_GEN_BIN := prowjob-gen +PROWJOB_GEN := $(abspath $(TOOLS_BIN_DIR)/$(PROWJOB_GEN_BIN)) + RUNTIME_OPENAPI_GEN_BIN := runtime-openapi-gen RUNTIME_OPENAPI_GEN := $(abspath $(TOOLS_BIN_DIR)/$(RUNTIME_OPENAPI_GEN_BIN)) TILT_PREPARE_BIN := tilt-prepare TILT_PREPARE := $(abspath $(TOOLS_BIN_DIR)/$(TILT_PREPARE_BIN)) -GOLANGCI_LINT_BIN := golangci-lint -GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN)) - # Define Docker related variables. Releases should modify and double check these vars. REGISTRY ?= gcr.io/$(shell gcloud config get-value project) PROD_REGISTRY ?= registry.k8s.io/cluster-api STAGING_REGISTRY ?= gcr.io/k8s-staging-cluster-api -STAGING_BUCKET ?= artifacts.k8s-staging-cluster-api.appspot.com +STAGING_BUCKET ?= k8s-staging-cluster-api # core IMAGE_NAME ?= cluster-api-controller @@ -200,6 +225,10 @@ KUBEADM_CONTROL_PLANE_CONTROLLER_IMG ?= $(REGISTRY)/$(KUBEADM_CONTROL_PLANE_IMAG CAPD_IMAGE_NAME ?= capd-manager CAPD_CONTROLLER_IMG ?= $(REGISTRY)/$(CAPD_IMAGE_NAME) +# capim +CAPIM_IMAGE_NAME ?= capim-manager +CAPIM_CONTROLLER_IMG ?= $(REGISTRY)/$(CAPIM_IMAGE_NAME) + # clusterctl CLUSTERCTL_MANIFEST_DIR := cmd/clusterctl/config CLUSTERCTL_IMAGE_NAME ?= clusterctl @@ -216,7 +245,7 @@ CAPI_KIND_CLUSTER_NAME ?= capi-test TAG ?= dev ARCH ?= $(shell go env GOARCH) -ALL_ARCH = amd64 arm arm64 ppc64le s390x +ALL_ARCH ?= amd64 arm arm64 ppc64le s390x # Allow overriding the imagePullPolicy PULL_POLICY ?= Always @@ -234,7 +263,7 @@ LDFLAGS := $(shell hack/version.sh) all: test managers clusterctl help: # Display this help - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[0-9A-Za-z_-]+:.*?##/ { printf " \033[36m%-45s\033[0m %s\n", $$1, $$2 } /^\$$\([0-9A-Za-z_-]+\):.*?##/ { gsub("_","-", $$1); printf " \033[36m%-45s\033[0m %s\n", tolower(substr($$1, 3, length($$1)-7)), $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[0-9A-Za-z_-]+:.*?##/ { printf " \033[36m%-50s\033[0m %s\n", $$1, $$2 } /^\$$\([0-9A-Za-z_-]+\):.*?##/ { gsub("_","-", $$1); printf " \033[36m%-50s\033[0m %s\n", tolower(substr($$1, 3, length($$1)-7)), $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) ## -------------------------------------- ## Generate / Manifests @@ -242,7 +271,7 @@ help: # Display this help ##@ generate: -ALL_GENERATE_MODULES = core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure +ALL_GENERATE_MODULES = core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure in-memory-infrastructure test-extension .PHONY: generate generate: ## Run all generate-manifests-*, generate-go-deepcopy-*, generate-go-conversions-* and generate-go-openapi targets @@ -253,15 +282,19 @@ generate-manifests: $(addprefix generate-manifests-,$(ALL_GENERATE_MODULES)) ## .PHONY: generate-manifests-core generate-manifests-core: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core - $(MAKE) clean-generated-yaml SRC_DIRS="./config/crd/bases" + $(MAKE) clean-generated-yaml SRC_DIRS="./config/crd/bases,./config/webhook/manifests.yaml" $(CONTROLLER_GEN) \ + paths=./ \ paths=./api/... \ + paths=./internal/apis/core/... \ paths=./internal/controllers/... \ paths=./internal/webhooks/... \ paths=./$(EXP_DIR)/api/... \ paths=./$(EXP_DIR)/internal/controllers/... \ + paths=./$(EXP_DIR)/internal/webhooks/... \ paths=./$(EXP_DIR)/addons/api/... \ paths=./$(EXP_DIR)/addons/internal/controllers/... \ + paths=./$(EXP_DIR)/addons/internal/webhooks/... \ paths=./$(EXP_DIR)/ipam/api/... \ paths=./$(EXP_DIR)/ipam/internal/webhooks/... \ paths=./$(EXP_DIR)/runtime/api/... \ @@ -279,10 +312,13 @@ generate-manifests-core: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e. .PHONY: generate-manifests-kubeadm-bootstrap generate-manifests-kubeadm-bootstrap: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. for kubeadm bootstrap - $(MAKE) clean-generated-yaml SRC_DIRS="./bootstrap/kubeadm/config/crd/bases" + $(MAKE) clean-generated-yaml SRC_DIRS="./bootstrap/kubeadm/config/crd/bases,./bootstrap/kubeadm/config/webhook/manifests.yaml" $(CONTROLLER_GEN) \ + paths=./bootstrap/kubeadm \ paths=./bootstrap/kubeadm/api/... \ paths=./bootstrap/kubeadm/internal/controllers/... \ + paths=./bootstrap/kubeadm/internal/webhooks/... \ + paths=./internal/apis/bootstrap/kubeadm/... \ crd:crdVersions=v1 \ rbac:roleName=manager-role \ output:crd:dir=./bootstrap/kubeadm/config/crd/bases \ @@ -292,11 +328,13 @@ generate-manifests-kubeadm-bootstrap: $(CONTROLLER_GEN) ## Generate manifests e. .PHONY: generate-manifests-kubeadm-control-plane generate-manifests-kubeadm-control-plane: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. for kubeadm control plane - $(MAKE) clean-generated-yaml SRC_DIRS="./controlplane/kubeadm/config/crd/bases" + $(MAKE) clean-generated-yaml SRC_DIRS="./controlplane/kubeadm/config/crd/bases,./controlplane/kubeadm/config/webhook/manifests.yaml" $(CONTROLLER_GEN) \ + paths=./controlplane/kubeadm \ paths=./controlplane/kubeadm/api/... \ paths=./controlplane/kubeadm/internal/controllers/... \ paths=./controlplane/kubeadm/internal/webhooks/... \ + paths=./internal/apis/controlplane/kubeadm/... \ crd:crdVersions=v1 \ rbac:roleName=manager-role \ output:crd:dir=./controlplane/kubeadm/config/crd/bases \ @@ -306,18 +344,43 @@ generate-manifests-kubeadm-control-plane: $(CONTROLLER_GEN) ## Generate manifest .PHONY: generate-manifests-docker-infrastructure generate-manifests-docker-infrastructure: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. for docker infrastructure provider - $(MAKE) clean-generated-yaml SRC_DIRS="$(CAPD_DIR)/config/crd/bases" + $(MAKE) clean-generated-yaml SRC_DIRS="$(CAPD_DIR)/config/crd/bases,$(CAPD_DIR)/config/webhook/manifests.yaml" cd $(CAPD_DIR); $(CONTROLLER_GEN) \ + paths=./ \ paths=./api/... \ paths=./$(EXP_DIR)/api/... \ paths=./$(EXP_DIR)/internal/controllers/... \ + paths=./$(EXP_DIR)/internal/webhooks/... \ + paths=./internal/controllers/... \ + paths=./internal/webhooks/... \ + crd:crdVersions=v1 \ + rbac:roleName=manager-role \ + output:crd:dir=./config/crd/bases \ + output:webhook:dir=./config/webhook \ + webhook + + +.PHONY: generate-manifests-in-memory-infrastructure +generate-manifests-in-memory-infrastructure: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. for in-memory infrastructure provider + $(MAKE) clean-generated-yaml SRC_DIRS="$(CAPIM_DIR)/config/crd/bases,$(CAPIM_DIR)/config/webhook/manifests.yaml" + cd $(CAPIM_DIR); $(CONTROLLER_GEN) \ + paths=./ \ + paths=./api/... \ paths=./internal/controllers/... \ + paths=./internal/webhooks/... \ crd:crdVersions=v1 \ rbac:roleName=manager-role \ output:crd:dir=./config/crd/bases \ output:webhook:dir=./config/webhook \ webhook +.PHONY: generate-manifests-test-extension +generate-manifests-test-extension: $(CONTROLLER_GEN) ## Generate manifests e.g. RBAC for test-extension provider + cd ./test/extension; $(CONTROLLER_GEN) \ + paths=./... \ + output:rbac:dir=./config/rbac \ + rbac:roleName=manager-role + .PHONY: generate-go-deepcopy generate-go-deepcopy: ## Run all generate-go-deepcopy-* targets $(MAKE) $(addprefix generate-go-deepcopy-,$(ALL_GENERATE_MODULES)) @@ -360,6 +423,17 @@ generate-go-deepcopy-docker-infrastructure: $(CONTROLLER_GEN) ## Generate deepco paths=./api/... \ paths=./$(EXP_DIR)/api/... +.PHONY: generate-go-deepcopy-in-memory-infrastructure +generate-go-deepcopy-in-memory-infrastructure: $(CONTROLLER_GEN) ## Generate deepcopy go code for in-memory infrastructure provider + $(MAKE) clean-generated-deepcopy SRC_DIRS="$(CAPIM_DIR)/api,$(CAPIM_DIR)/internal/cloud/api" + cd $(CAPIM_DIR); $(CONTROLLER_GEN) \ + object:headerFile=../../../hack/boilerplate/boilerplate.generatego.txt \ + paths=./api/... \ + paths=./internal/cloud/api/... + +.PHONY: generate-go-deepcopy-test-extension +generate-go-deepcopy-test-extension: $(CONTROLLER_GEN) ## Generate deepcopy go code for test-extension + .PHONY: generate-go-conversions generate-go-conversions: ## Run all generate-go-conversions-* targets $(MAKE) $(addprefix generate-go-conversions-,$(ALL_GENERATE_MODULES)) @@ -368,100 +442,106 @@ generate-go-conversions: ## Run all generate-go-conversions-* targets generate-go-conversions-core: ## Run all generate-go-conversions-core-* targets $(MAKE) generate-go-conversions-core-api $(MAKE) generate-go-conversions-core-exp + $(MAKE) generate-go-conversions-core-exp-ipam $(MAKE) generate-go-conversions-core-runtime .PHONY: generate-go-conversions-core-api generate-go-conversions-core-api: $(CONVERSION_GEN) ## Generate conversions go code for core api - $(MAKE) clean-generated-conversions SRC_DIRS="./api/v1alpha3,./api/v1alpha4" + $(MAKE) clean-generated-conversions SRC_DIRS="./internal/apis/core/v1alpha3,./internal/apis/core/v1alpha4" $(CONVERSION_GEN) \ - --input-dirs=./api/v1alpha3 \ - --input-dirs=./api/v1alpha4 \ - --build-tag=ignore_autogenerated_core \ - --output-file-base=zz_generated.conversion $(CONVERSION_GEN_OUTPUT_BASE) \ - --go-header-file=./hack/boilerplate/boilerplate.generatego.txt + --output-file=zz_generated.conversion.go \ + --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ + ./internal/apis/core/v1alpha3 \ + ./internal/apis/core/v1alpha4 .PHONY: generate-go-conversions-core-exp generate-go-conversions-core-exp: $(CONVERSION_GEN) ## Generate conversions go code for core exp - $(MAKE) clean-generated-conversions SRC_DIRS="./$(EXP_DIR)/api/v1alpha3,./$(EXP_DIR)/addons/api/v1alpha3,./$(EXP_DIR)/api/v1alpha4,./$(EXP_DIR)/addons/api/v1alpha4" + $(MAKE) clean-generated-conversions SRC_DIRS="./internal/apis/core/exp/v1alpha3,./internal/apis/core/exp/addons/v1alpha3,./internal/apis/core/exp/v1alpha4,./internal/apis/core/exp/addons/v1alpha4" $(CONVERSION_GEN) \ - --input-dirs=./$(EXP_DIR)/api/v1alpha3 \ - --input-dirs=./$(EXP_DIR)/api/v1alpha4 \ - --input-dirs=./$(EXP_DIR)/addons/api/v1alpha3 \ - --input-dirs=./$(EXP_DIR)/addons/api/v1alpha4 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha3 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha4 \ - --output-file-base=zz_generated.conversion $(CONVERSION_GEN_OUTPUT_BASE) \ - --go-header-file=./hack/boilerplate/boilerplate.generatego.txt + --extra-dirs=sigs.k8s.io/cluster-api/internal/apis/core/v1alpha3 \ + --extra-dirs=sigs.k8s.io/cluster-api/internal/apis/core/v1alpha4 \ + --output-file=zz_generated.conversion.go \ + --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ + ./internal/apis/core/exp/v1alpha3 \ + ./internal/apis/core/exp/v1alpha4 \ + ./internal/apis/core/exp/addons/v1alpha3 \ + ./internal/apis/core/exp/addons/v1alpha4 + +.PHONY: generate-go-conversions-core-exp-ipam +generate-go-conversions-core-exp-ipam: $(CONVERSION_GEN) ## Generate conversions go code for core exp IPAM + $(MAKE) clean-generated-conversions SRC_DIRS="./$(EXP_DIR)/ipam/api/v1alpha1" + $(CONVERSION_GEN) \ + --output-file=zz_generated.conversion.go \ + --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ + ./$(EXP_DIR)/ipam/api/v1alpha1 .PHONY: generate-go-conversions-core-runtime generate-go-conversions-core-runtime: $(CONVERSION_GEN) ## Generate conversions go code for core runtime $(MAKE) clean-generated-conversions SRC_DIRS="./internal/runtime/test/v1alpha1,./internal/runtime/test/v1alpha2" $(CONVERSION_GEN) \ - --input-dirs=./internal/runtime/test/v1alpha1 \ - --input-dirs=./internal/runtime/test/v1alpha2 \ - --build-tag=ignore_autogenerated_runtime \ - --output-file-base=zz_generated.conversion $(CONVERSION_GEN_OUTPUT_BASE) \ - --go-header-file=./hack/boilerplate/boilerplate.generatego.txt + --output-file=zz_generated.conversion.go \ + --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ + ./internal/runtime/test/v1alpha1 \ + ./internal/runtime/test/v1alpha2 .PHONY: generate-go-conversions-kubeadm-bootstrap generate-go-conversions-kubeadm-bootstrap: $(CONVERSION_GEN) ## Generate conversions go code for kubeadm bootstrap - $(MAKE) clean-generated-conversions SRC_DIRS="./bootstrap/kubeadm/api" + $(MAKE) clean-generated-conversions SRC_DIRS="./internal/apis/bootstrap/kubeadm" $(CONVERSION_GEN) \ - --input-dirs=./bootstrap/kubeadm/api/v1alpha3 \ - --input-dirs=./bootstrap/kubeadm/api/v1alpha4 \ - --build-tag=ignore_autogenerated_kubeadm_bootstrap \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha3 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha4 \ - --output-file-base=zz_generated.conversion $(CONVERSION_GEN_OUTPUT_BASE) \ - --go-header-file=./hack/boilerplate/boilerplate.generatego.txt - $(MAKE) clean-generated-conversions SRC_DIRS="./bootstrap/kubeadm/types/upstreamv1beta1,./bootstrap/kubeadm/types/upstreamv1beta2,./bootstrap/kubeadm/types/upstreamv1beta3" + --output-file=zz_generated.conversion.go \ + --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ + ./internal/apis/bootstrap/kubeadm/v1alpha3 \ + ./internal/apis/bootstrap/kubeadm/v1alpha4 + $(MAKE) clean-generated-conversions SRC_DIRS="./bootstrap/kubeadm/types/upstreamv1beta2,./bootstrap/kubeadm/types/upstreamv1beta3" $(CONVERSION_GEN) \ - --input-dirs=./bootstrap/kubeadm/types/upstreamv1beta1 \ - --input-dirs=./bootstrap/kubeadm/types/upstreamv1beta2 \ - --input-dirs=./bootstrap/kubeadm/types/upstreamv1beta3 \ - --build-tag=ignore_autogenerated_kubeadm_types \ - --output-file-base=zz_generated.conversion $(CONVERSION_GEN_OUTPUT_BASE) \ - --go-header-file=./hack/boilerplate/boilerplate.generatego.txt + --output-file=zz_generated.conversion.go \ + --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ + ./bootstrap/kubeadm/types/upstreamv1beta2 \ + ./bootstrap/kubeadm/types/upstreamv1beta3 .PHONY: generate-go-conversions-kubeadm-control-plane generate-go-conversions-kubeadm-control-plane: $(CONVERSION_GEN) ## Generate conversions go code for kubeadm control plane - $(MAKE) clean-generated-conversions SRC_DIRS="./controlplane/kubeadm/api" + $(MAKE) clean-generated-conversions SRC_DIRS="./internal/apis/controlplane/kubeadm" $(CONVERSION_GEN) \ - --input-dirs=./controlplane/kubeadm/api/v1alpha3 \ - --input-dirs=./controlplane/kubeadm/api/v1alpha4 \ - --build-tag=ignore_autogenerated_kubeadm_controlplane \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha3 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha4 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4 \ - --output-file-base=zz_generated.conversion $(CONVERSION_GEN_OUTPUT_BASE) \ - --go-header-file=./hack/boilerplate/boilerplate.generatego.txt + --extra-dirs=sigs.k8s.io/cluster-api/internal/apis/core/v1alpha3 \ + --extra-dirs=sigs.k8s.io/cluster-api/internal/apis/core/v1alpha4 \ + --extra-dirs=sigs.k8s.io/cluster-api/internal/apis/bootstrap/kubeadm/v1alpha3 \ + --extra-dirs=sigs.k8s.io/cluster-api/internal/apis/bootstrap/kubeadm/v1alpha4 \ + --output-file=zz_generated.conversion.go \ + --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ + ./internal/apis/controlplane/kubeadm/v1alpha3 \ + ./internal/apis/controlplane/kubeadm/v1alpha4 .PHONY: generate-go-conversions-docker-infrastructure generate-go-conversions-docker-infrastructure: $(CONVERSION_GEN) ## Generate conversions go code for docker infrastructure provider cd $(CAPD_DIR); $(CONVERSION_GEN) \ - --input-dirs=./api/v1alpha3 \ - --input-dirs=./api/v1alpha4 \ - --input-dirs=./$(EXP_DIR)/api/v1alpha3 \ - --input-dirs=./$(EXP_DIR)/api/v1alpha4 \ - --build-tag=ignore_autogenerated_capd \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha3 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha4 \ - --output-file-base=zz_generated.conversion $(CONVERSION_GEN_OUTPUT_BASE_CAPD) \ - --go-header-file=../../../hack/boilerplate/boilerplate.generatego.txt + --output-file=zz_generated.conversion.go \ + --go-header-file=../../../hack/boilerplate/boilerplate.generatego.txt \ + ./api/v1alpha3 \ + ./api/v1alpha4 \ + ./$(EXP_DIR)/api/v1alpha3 \ + ./$(EXP_DIR)/api/v1alpha4 + +.PHONY: generate-go-conversions-in-memory-infrastructure +generate-go-conversions-in-memory-infrastructure: $(CONVERSION_GEN) ## Generate conversions go code for in-memory infrastructure provider + cd $(CAPIM_DIR) + +.PHONY: generate-go-conversions-test-extension +generate-go-conversions-test-extension: $(CONVERSION_GEN) ## Generate conversions go code for in-memory infrastructure provider # The tmp/sigs.k8s.io/cluster-api symlink is a workaround to make this target run outside of GOPATH .PHONY: generate-go-openapi generate-go-openapi: $(OPENAPI_GEN) $(CONTROLLER_GEN) ## Generate openapi go code for runtime SDK @mkdir -p ./tmp/sigs.k8s.io; ln -s $(ROOT_DIR) ./tmp/sigs.k8s.io/; cd ./tmp; \ for pkg in "api/v1beta1" "$(EXP_DIR)/runtime/hooks/api/v1alpha1"; do \ - $(MAKE) clean-generated-openapi-definitions SRC_DIRS="./$${pkg}"; \ + (cd ../ && $(MAKE) clean-generated-openapi-definitions SRC_DIRS="./$${pkg}"); \ echo "** Generating openapi schema for types in ./$${pkg} **"; \ $(OPENAPI_GEN) \ - --input-dirs=sigs.k8s.io/cluster-api/$${pkg} \ - --output-file-base=zz_generated.openapi \ - --output-package=sigs.k8s.io/cluster-api/$${pkg} \ - --go-header-file=../hack/boilerplate/boilerplate.generatego.txt; \ + --output-dir=../$${pkg} \ + --output-file=zz_generated.openapi.go \ + --output-pkg=sigs.k8s.io/cluster-api/$${pkg} \ + --go-header-file=../hack/boilerplate/boilerplate.generatego.txt \ + sigs.k8s.io/cluster-api/$${pkg}; \ done; \ rm sigs.k8s.io/cluster-api @@ -471,10 +551,15 @@ generate-modules: ## Run go mod tidy to ensure modules are up to date cd $(TOOLS_DIR); go mod tidy cd $(TEST_DIR); go mod tidy +.PHONY: generate-doctoc +generate-doctoc: + TRACE=$(TRACE) ./hack/generate-doctoc.sh + .PHONY: generate-e2e-templates -generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v0.3 v0.4 v1.0 v1.2 v1.3 main) ## Generate cluster templates for all versions +generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v0.3 v0.4 v1.0 v1.5 v1.6 main) ## Generate cluster templates for all versions DOCKER_TEMPLATES := test/e2e/data/infrastructure-docker +INMEMORY_TEMPLATES := test/e2e/data/infrastructure-inmemory .PHONY: generate-e2e-templates-v0.3 generate-e2e-templates-v0.3: $(KUSTOMIZE) @@ -488,15 +573,15 @@ generate-e2e-templates-v0.4: $(KUSTOMIZE) generate-e2e-templates-v1.0: $(KUSTOMIZE) $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.0/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.0/cluster-template.yaml -.PHONY: generate-e2e-templates-v1.2 -generate-e2e-templates-v1.2: $(KUSTOMIZE) - $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.2/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.2/cluster-template.yaml - $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.2/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.2/cluster-template-topology.yaml +.PHONY: generate-e2e-templates-v1.5 +generate-e2e-templates-v1.5: $(KUSTOMIZE) + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.5/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.5/cluster-template.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.5/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.5/cluster-template-topology.yaml -.PHONY: generate-e2e-templates-v1.3 -generate-e2e-templates-v1.3: $(KUSTOMIZE) - $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.3/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.3/cluster-template.yaml - $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.3/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.3/cluster-template-topology.yaml +.PHONY: generate-e2e-templates-v1.6 +generate-e2e-templates-v1.6: $(KUSTOMIZE) + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.6/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.6/cluster-template.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.6/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.6/cluster-template-topology.yaml .PHONY: generate-e2e-templates-main generate-e2e-templates-main: $(KUSTOMIZE) @@ -509,36 +594,52 @@ generate-e2e-templates-main: $(KUSTOMIZE) $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-machine-pool --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-machine-pool.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-node-drain --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-node-drain.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-upgrades.yaml - $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-upgrades-cgroupfs --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-upgrades-cgroupfs.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-upgrades-runtimesdk --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-upgrades-runtimesdk.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-kcp-scale-in --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-kcp-scale-in.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-ipv6 --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-ipv6.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology-dualstack-ipv6-primary --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology-dualstack-ipv6-primary.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology-dualstack-ipv4-primary --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology-dualstack-ipv4-primary.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology-single-node-cluster --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology-single-node-cluster.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology-autoscaler --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology-autoscaler.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-ignition --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-ignition.yaml -.PHONY: generate-test-extension-deployment -generate-test-extension-deployment: $(KUSTOMIZE) - mkdir -p test/e2e/data/test-extension - $(KUSTOMIZE) build test/extension/config/default > test/e2e/data/test-extension/deployment.yaml + $(KUSTOMIZE) build $(INMEMORY_TEMPLATES)/main/cluster-template --load-restrictor LoadRestrictionsNone > $(INMEMORY_TEMPLATES)/main/cluster-template.yaml .PHONY: generate-metrics-config -generate-metrics-config: $(ENVSUBST_BIN) ## Generate ./hack/observability/kube-state-metrics/crd-config.yaml - OUTPUT_FILE="${OBSERVABILITY_DIR}/kube-state-metrics/crd-config.yaml"; \ - METRICS_DIR="${OBSERVABILITY_DIR}/kube-state-metrics/metrics"; \ +generate-metrics-config: $(ENVSUBST_BIN) ## Generate ./config/metrics/crd-metrics-config.yaml + OUTPUT_FILE="./config/metrics/crd-metrics-config.yaml"; \ + METRIC_TEMPLATES_DIR="./config/metrics/templates"; \ echo "# This file was auto-generated via: make generate-metrics-config" > "$${OUTPUT_FILE}"; \ - cat "$${METRICS_DIR}/header.yaml" >> "$${OUTPUT_FILE}"; \ - for resource in cluster kubeadmcontrolplane machine machinedeployment machinehealthcheck machineset machinepool; do \ - cat "$${METRICS_DIR}/$${resource}.yaml"; \ - RESOURCE="$${resource}" ${ENVSUBST_BIN} < "$${METRICS_DIR}/common_metrics.yaml"; \ + cat "$${METRIC_TEMPLATES_DIR}/header.yaml" >> "$${OUTPUT_FILE}"; \ + for resource in clusterclass cluster kubeadmcontrolplane kubeadmconfig machine machinedeployment machinehealthcheck machineset machinepool; do \ + cat "$${METRIC_TEMPLATES_DIR}/$${resource}.yaml"; \ + RESOURCE="$${resource}" ${ENVSUBST_BIN} < "$${METRIC_TEMPLATES_DIR}/common_metrics.yaml"; \ if [[ "$${resource}" != "cluster" ]]; then \ - cat "$${METRICS_DIR}/owner_metric.yaml"; \ + cat "$${METRIC_TEMPLATES_DIR}/owner_metric.yaml"; \ fi \ done >> "$${OUTPUT_FILE}"; \ .PHONY: generate-diagrams generate-diagrams: ## Generate diagrams for *.plantuml files - $(MAKE) -C docs diagrams + $(MAKE) generate-diagrams-book + $(MAKE) generate-diagrams-proposals + +.PHONY: generate-diagrams-book +generate-diagrams-book: ## Generate diagrams for *.plantuml files in book + docker run -v $(ROOT_DIR)/$(DOCS_DIR):/$(DOCS_DIR)$(DOCKER_VOL_OPTS) plantuml/plantuml:$(PLANTUML_VER) /$(DOCS_DIR)/book/**/*.plantuml + +.PHONY: generate-diagrams-proposals +generate-diagrams-proposals: ## Generate diagrams for *.plantuml files in proposals + docker run -v $(ROOT_DIR)/$(DOCS_DIR):/$(DOCS_DIR)$(DOCKER_VOL_OPTS) plantuml/plantuml:$(PLANTUML_VER) /$(DOCS_DIR)/proposals/**/*.plantuml + +.PHONY: generate-test-infra-prowjobs +generate-test-infra-prowjobs: $(PROWJOB_GEN) ## Generates the prowjob configurations in test-infra + @if [ -z "${TEST_INFRA_DIR}" ]; then echo "TEST_INFRA_DIR is not set"; exit 1; fi + $(PROWJOB_GEN) \ + -config "$(TEST_INFRA_DIR)/config/jobs/kubernetes-sigs/cluster-api/cluster-api-prowjob-gen.yaml" \ + -templates-dir "$(TEST_INFRA_DIR)/config/jobs/kubernetes-sigs/cluster-api/templates" \ + -output-dir "$(TEST_INFRA_DIR)/config/jobs/kubernetes-sigs/cluster-api" ## -------------------------------------- ## Lint / Verify @@ -549,13 +650,13 @@ generate-diagrams: ## Generate diagrams for *.plantuml files .PHONY: lint lint: $(GOLANGCI_LINT) ## Lint the codebase $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) - cd $(TEST_DIR); $(GOLANGCI_LINT) run --path-prefix $(TEST_DIR) -v $(GOLANGCI_LINT_EXTRA_ARGS) - cd $(TOOLS_DIR); $(GOLANGCI_LINT) run --path-prefix $(TOOLS_DIR) -v $(GOLANGCI_LINT_EXTRA_ARGS) - ./scripts/ci-lint-dockerfiles.sh $(HADOLINT_VER) $(HADOLINT_FAILURE_THRESHOLD) + cd $(TEST_DIR); $(GOLANGCI_LINT) run --path-prefix $(TEST_DIR) --config $(ROOT_DIR)/.golangci.yml -v $(GOLANGCI_LINT_EXTRA_ARGS) + cd $(TOOLS_DIR); $(GOLANGCI_LINT) run --path-prefix $(TOOLS_DIR) --config $(ROOT_DIR)/.golangci.yml -v $(GOLANGCI_LINT_EXTRA_ARGS) + ./scripts/lint-dockerfiles.sh $(HADOLINT_VER) $(HADOLINT_FAILURE_THRESHOLD) .PHONY: lint-dockerfiles lint-dockerfiles: - ./scripts/ci-lint-dockerfiles.sh $(HADOLINT_VER) $(HADOLINT_FAILURE_THRESHOLD) + ./scripts/lint-dockerfiles.sh $(HADOLINT_VER) $(HADOLINT_FAILURE_THRESHOLD) .PHONY: lint-fix lint-fix: $(GOLANGCI_LINT) ## Lint the codebase and run auto-fixers if supported by the linter @@ -571,11 +672,15 @@ APIDIFF_OLD_COMMIT ?= $(shell git rev-parse origin/main) apidiff: $(GO_APIDIFF) ## Check for API differences $(GO_APIDIFF) $(APIDIFF_OLD_COMMIT) --print-compatible -ALL_VERIFY_CHECKS = doctoc boilerplate shellcheck tiltfile modules gen conversions capi-book-summary +ALL_VERIFY_CHECKS = licenses boilerplate shellcheck tiltfile modules gen conversions doctoc capi-book-summary diagrams import-restrictions go-directive .PHONY: verify verify: $(addprefix verify-,$(ALL_VERIFY_CHECKS)) lint-dockerfiles ## Run all verify-* targets +.PHONY: verify-go-directive +verify-go-directive: + TRACE=$(TRACE) ./hack/verify-go-directive.sh -g $(GO_DIRECTIVE_VERSION) + .PHONY: verify-modules verify-modules: generate-modules ## Verify go modules are up to date @if !(git diff --quiet HEAD -- go.sum go.mod $(TOOLS_DIR)/go.mod $(TOOLS_DIR)/go.sum $(TEST_DIR)/go.mod $(TEST_DIR)/go.sum); then \ @@ -599,8 +704,11 @@ verify-conversions: $(CONVERSION_VERIFIER) ## Verifies expected API conversion $(CONVERSION_VERIFIER) .PHONY: verify-doctoc -verify-doctoc: - TRACE=$(TRACE) ./hack/verify-doctoc.sh +verify-doctoc: generate-doctoc + @if !(git diff --quiet HEAD); then \ + git diff; \ + echo "doctoc is out of date, run make generate-doctoc"; exit 1; \ + fi .PHONY: verify-capi-book-summary verify-capi-book-summary: @@ -620,7 +728,40 @@ verify-tiltfile: ## Verify Tiltfile format .PHONY: verify-container-images verify-container-images: ## Verify container images - TRACE=$(TRACE) ./hack/verify-container-images.sh + TRACE=$(TRACE) ./hack/verify-container-images.sh $(TRIVY_VER) + +.PHONY: verify-licenses +verify-licenses: ## Verify licenses + TRACE=$(TRACE) ./hack/verify-licenses.sh $(TRIVY_VER) + +.PHONY: verify-govulncheck +verify-govulncheck: $(GOVULNCHECK) ## Verify code for vulnerabilities + $(GOVULNCHECK) ./... && R1=$$? || R1=$$?; \ + $(GOVULNCHECK) -C "$(TOOLS_DIR)" ./... && R2=$$? || R2=$$?; \ + $(GOVULNCHECK) -C "$(TEST_DIR)" ./... && R3=$$? || R3=$$?; \ + if [ "$$R1" -ne "0" ] || [ "$$R2" -ne "0" ] || [ "$$R3" -ne "0" ]; then \ + exit 1; \ + fi + +.PHONY: verify-diagrams +verify-diagrams: generate-diagrams ## Verify generated diagrams are up to date + @if !(git diff --quiet HEAD); then \ + git diff; \ + echo "generated diagrams are out of date, run make generate-diagrams"; exit 1; \ + fi + +.PHONY: verify-security +verify-security: ## Verify code and images for vulnerabilities + $(MAKE) verify-container-images && R1=$$? || R1=$$?; \ + $(MAKE) verify-govulncheck && R2=$$? || R2=$$?; \ + if [ "$$R1" -ne "0" ] || [ "$$R2" -ne "0" ]; then \ + echo "Check for vulnerabilities failed! There are vulnerabilities to be fixed"; \ + exit 1; \ + fi + +.PHONY: verify-import-restrictions +verify-import-restrictions: $(IMPORT_BOSS) ## Verify import restrictions with import-boss + ./hack/verify-import-restrictions.sh ## -------------------------------------- ## Binaries @@ -632,7 +773,7 @@ verify-container-images: ## Verify container images clusterctl: ## Build the clusterctl binary go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/clusterctl sigs.k8s.io/cluster-api/cmd/clusterctl -ALL_MANAGERS = core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure +ALL_MANAGERS = core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure in-memory-infrastructure .PHONY: managers managers: $(addprefix manager-,$(ALL_MANAGERS)) ## Run all manager-* targets @@ -653,6 +794,10 @@ manager-kubeadm-control-plane: ## Build the kubeadm control plane manager binary manager-docker-infrastructure: ## Build the docker infrastructure manager binary into the ./bin folder cd $(CAPD_DIR); go build -trimpath -ldflags "$(LDFLAGS)" -o ../../../$(BIN_DIR)/capd-manager sigs.k8s.io/cluster-api/test/infrastructure/docker +.PHONY: manager-in-memory-infrastructure +manager-in-memory-infrastructure: ## Build the in-memory-infrastructure infrastructure manager binary into the ./bin folder + cd $(CAPIM_DIR); go build -trimpath -ldflags "$(LDFLAGS)" -o ../../../$(BIN_DIR)/capim-manager sigs.k8s.io/cluster-api/test/infrastructure/inmemory + .PHONY: docker-pull-prerequisites docker-pull-prerequisites: docker pull docker.io/docker/dockerfile:1.4 @@ -665,13 +810,14 @@ docker-build-all: $(addprefix docker-build-,$(ALL_ARCH)) ## Build docker images docker-build-%: $(MAKE) ARCH=$* docker-build -ALL_DOCKER_BUILD = core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure test-extension clusterctl +# Choice of images to build/push +ALL_DOCKER_BUILD ?= core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure in-memory-infrastructure test-extension clusterctl .PHONY: docker-build docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all the images $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD)) -ALL_DOCKER_BUILD_E2E = core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure test-extension +ALL_DOCKER_BUILD_E2E = core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure in-memory-infrastructure test-extension .PHONY: docker-build-e2e docker-build-e2e: ## Run docker-build-* targets for all the images with settings to be used for the e2e tests @@ -681,35 +827,48 @@ docker-build-e2e: ## Run docker-build-* targets for all the images with settings .PHONY: docker-build-core docker-build-core: ## Build the docker image for core controller manager - DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CONTROLLER_IMG)-$(ARCH):$(TAG) +## reads Dockerfile from stdin to avoid an incorrectly cached Dockerfile (https://github.com/moby/buildkit/issues/1368) + cat ./Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CONTROLLER_IMG)-$(ARCH):$(TAG) --file - $(MAKE) set-manifest-image MANIFEST_IMG=$(CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./config/default/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./config/default/manager_pull_policy.yaml" .PHONY: docker-build-kubeadm-bootstrap docker-build-kubeadm-bootstrap: ## Build the docker image for kubeadm bootstrap controller manager - DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./bootstrap/kubeadm --build-arg ldflags="$(LDFLAGS)" . -t $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG) +## reads Dockerfile from stdin to avoid an incorrectly cached Dockerfile (https://github.com/moby/buildkit/issues/1368) + cat ./Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./bootstrap/kubeadm --build-arg ldflags="$(LDFLAGS)" . -t $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG) --file - $(MAKE) set-manifest-image MANIFEST_IMG=$(KUBEADM_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./bootstrap/kubeadm/config/default/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./bootstrap/kubeadm/config/default/manager_pull_policy.yaml" .PHONY: docker-build-kubeadm-control-plane docker-build-kubeadm-control-plane: ## Build the docker image for kubeadm control plane controller manager - DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./controlplane/kubeadm --build-arg ldflags="$(LDFLAGS)" . -t $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG) +## reads Dockerfile from stdin to avoid an incorrectly cached Dockerfile (https://github.com/moby/buildkit/issues/1368) + cat ./Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./controlplane/kubeadm --build-arg ldflags="$(LDFLAGS)" . -t $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG) --file - $(MAKE) set-manifest-image MANIFEST_IMG=$(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./controlplane/kubeadm/config/default/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./controlplane/kubeadm/config/default/manager_pull_policy.yaml" .PHONY: docker-build-docker-infrastructure docker-build-docker-infrastructure: ## Build the docker image for docker infrastructure controller manager - cd $(CAPD_DIR); DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" ../../.. -t $(CAPD_CONTROLLER_IMG)-$(ARCH):$(TAG) --file Dockerfile +## reads Dockerfile from stdin to avoid an incorrectly cached Dockerfile (https://github.com/moby/buildkit/issues/1368) + cat $(CAPD_DIR)/Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CAPD_CONTROLLER_IMG)-$(ARCH):$(TAG) --file - $(MAKE) set-manifest-image MANIFEST_IMG=$(CAPD_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="$(CAPD_DIR)/config/default/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="$(CAPD_DIR)/config/default/manager_pull_policy.yaml" +.PHONY: docker-build-in-memory-infrastructure +docker-build-in-memory-infrastructure: ## Build the docker image for in-memory infrastructure controller manager +## reads Dockerfile from stdin to avoid an incorrectly cached Dockerfile (https://github.com/moby/buildkit/issues/1368) + cat $(CAPIM_DIR)/Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CAPIM_CONTROLLER_IMG)-$(ARCH):$(TAG) --file - + $(MAKE) set-manifest-image MANIFEST_IMG=$(CAPIM_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="$(CAPIM_DIR)/config/default/manager_image_patch.yaml" + $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="$(CAPIM_DIR)/config/default/manager_pull_policy.yaml" + .PHONY: docker-build-clusterctl docker-build-clusterctl: ## Build the docker image for clusterctl - DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./cmd/clusterctl --build-arg ldflags="$(LDFLAGS)" -f ./cmd/clusterctl/Dockerfile . -t $(CLUSTERCTL_IMG)-$(ARCH):$(TAG) +## reads Dockerfile from stdin to avoid an incorrectly cached Dockerfile (https://github.com/moby/buildkit/issues/1368) + cat ./cmd/clusterctl/Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./cmd/clusterctl --build-arg ldflags="$(LDFLAGS)" . -t $(CLUSTERCTL_IMG)-$(ARCH):$(TAG) --file - .PHONY: docker-build-test-extension docker-build-test-extension: ## Build the docker image for core controller manager - DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(TEST_EXTENSION_IMG)-$(ARCH):$(TAG) --file ./test/extension/Dockerfile +## reads Dockerfile from stdin to avoid an incorrectly cached Dockerfile (https://github.com/moby/buildkit/issues/1368) + cat ./test/extension/Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(TEST_EXTENSION_IMG)-$(ARCH):$(TAG) --file - $(MAKE) set-manifest-image MANIFEST_IMG=$(TEST_EXTENSION_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./test/extension/config/default/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./test/extension/config/default/manager_pull_policy.yaml" @@ -769,6 +928,20 @@ test-docker-infrastructure-junit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit and $(GOTESTSUM) --junitfile $(ARTIFACTS)/junit.infra_docker.xml --raw-command cat $(ARTIFACTS)/junit.infra_docker.stdout exit $$(cat $(ARTIFACTS)/junit.infra_docker.exitcode) +.PHONY: test-in-memory-infrastructure +test-in-memory-infrastructure: $(SETUP_ENVTEST) ## Run unit and integration tests for in-memory infrastructure provider + cd $(CAPIM_DIR); KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test ./... $(TEST_ARGS) + +.PHONY: test-in-memory-infrastructure-verbose +test-in-memory-infrastructure-verbose: ## Run unit and integration tests for in-memory infrastructure provider with verbose flag + $(MAKE) test-in-memory-infrastructure TEST_ARGS="$(TEST_ARGS) -v" + +.PHONY: test-in-memory-infrastructure-junit +test-in-memory-infrastructure-junit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit and integration tests and generate a junit report for in-memory infrastructure provider + cd $(CAPIM_DIR); set +o errexit; (KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -json ./... $(TEST_ARGS); echo $$? > $(ARTIFACTS)/junit.infra_inmemory.exitcode) | tee $(ARTIFACTS)/junit.infra_inmemory.stdout + $(GOTESTSUM) --junitfile $(ARTIFACTS)/junit.infra_inmemory.xml --raw-command cat $(ARTIFACTS)/junit.infra_inmemory.stdout + exit $$(cat $(ARTIFACTS)/junit.infra_inmemory.exitcode) + .PHONY: test-test-extension test-test-extension: $(SETUP_ENVTEST) ## Run unit and integration tests for the test extension cd $(TEST_EXTENSION_DIR); KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test ./... $(TEST_ARGS) @@ -784,7 +957,7 @@ test-test-extension-junit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit and integra exit $$(cat $(ARTIFACTS)/junit.test_extension.exitcode) .PHONY: test-e2e -test-e2e: $(GINKGO) generate-e2e-templates generate-test-extension-deployment ## Run the end-to-end tests +test-e2e: $(GINKGO) generate-e2e-templates ## Run the end-to-end tests $(GINKGO) -v --trace -poll-progress-after=$(GINKGO_POLL_PROGRESS_AFTER) \ -poll-progress-interval=$(GINKGO_POLL_PROGRESS_INTERVAL) --tags=e2e --focus="$(GINKGO_FOCUS)" \ $(_SKIP_ARGS) --nodes=$(GINKGO_NODES) --timeout=$(GINKGO_TIMEOUT) --no-color=$(GINKGO_NOCOLOR) \ @@ -798,6 +971,11 @@ test-e2e: $(GINKGO) generate-e2e-templates generate-test-extension-deployment ## kind-cluster: ## Create a new kind cluster designed for development with Tilt hack/kind-install-for-capd.sh +.PHONY: tilt-e2e-prerequisites +tilt-e2e-prerequisites: ## Build the corresponding kindest/node images required for e2e testing and generate the e2e templates + scripts/build-kind.sh + $(MAKE) generate-e2e-templates + .PHONY: tilt-up tilt-up: kind-cluster ## Start tilt and build kind cluster if needed. tilt up @@ -822,7 +1000,7 @@ PREVIOUS_TAG ?= $(shell git tag -l | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$$" | sort ## set by Prow, ref name of the base branch, e.g., main RELEASE_ALIAS_TAG := $(PULL_BASE_REF) RELEASE_DIR := out -RELEASE_NOTES_DIR := _releasenotes +RELEASE_NOTES_DIR := CHANGELOG USER_FORK ?= $(shell git config --get remote.origin.url | cut -d/ -f4) # only works on https://github.com//cluster-api.git style URLs ifeq ($(USER_FORK),) USER_FORK := $(shell git config --get remote.origin.url | cut -d: -f2 | cut -d/ -f1) # for git@github.com:/cluster-api.git style URLs @@ -881,6 +1059,14 @@ manifest-modification-dev: # Set the manifest images to the staging bucket. MANIFEST_IMG=$(REGISTRY)/$(CAPD_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \ TARGET_RESOURCE="$(CAPD_DIR)/config/default/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="$(CAPD_DIR)/config/default/manager_pull_policy.yaml" + $(MAKE) set-manifest-image \ + MANIFEST_IMG=$(REGISTRY)/$(CAPIM_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \ + TARGET_RESOURCE="$(CAPIM_DIR)/config/default/manager_image_patch.yaml" + $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="$(CAPIM_DIR)/config/default/manager_pull_policy.yaml" + $(MAKE) set-manifest-image \ + MANIFEST_IMG=$(REGISTRY)/$(TEST_EXTENSION_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \ + TARGET_RESOURCE="$(TEST_EXTENSION_DIR)/config/default/manager_image_patch.yaml" + $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="$(CAPD_DIR)/config/default/manager_pull_policy.yaml" .PHONY: release-manifests @@ -908,6 +1094,9 @@ release-manifests: $(RELEASE_DIR) $(KUSTOMIZE) $(RUNTIME_OPENAPI_GEN) ## Build t release-manifests-dev: $(RELEASE_DIR) $(KUSTOMIZE) ## Build the development manifests and copies them in the release folder cd $(CAPD_DIR); $(KUSTOMIZE) build config/default > ../../../$(RELEASE_DIR)/infrastructure-components-development.yaml cp $(CAPD_DIR)/templates/* $(RELEASE_DIR)/ + cd $(CAPIM_DIR); $(KUSTOMIZE) build config/default > ../../../$(RELEASE_DIR)/infrastructure-components-in-memory-development.yaml + cp $(CAPIM_DIR)/templates/* $(RELEASE_DIR)/ + cd $(TEST_EXTENSION_DIR); $(KUSTOMIZE) build config/default > ../../$(RELEASE_DIR)/runtime-extension-components-development.yaml .PHONY: release-binaries release-binaries: ## Build the binaries to publish with a release @@ -935,7 +1124,21 @@ release-binary: $(RELEASE_DIR) .PHONY: release-staging release-staging: ## Build and push container images to the staging bucket - REGISTRY=$(STAGING_REGISTRY) $(MAKE) docker-build-all docker-push-all release-alias-tag + REGISTRY=$(STAGING_REGISTRY) $(MAKE) docker-build-all + REGISTRY=$(STAGING_REGISTRY) $(MAKE) docker-image-verify + REGISTRY=$(STAGING_REGISTRY) $(MAKE) docker-push-all + REGISTRY=$(STAGING_REGISTRY) $(MAKE) release-alias-tag + # Set the manifest image to the staging bucket. + $(MAKE) manifest-modification REGISTRY=$(STAGING_REGISTRY) RELEASE_TAG=$(RELEASE_ALIAS_TAG) + ## Build the manifests + $(MAKE) release-manifests + # Set the manifest image to the staging bucket. + $(MAKE) manifest-modification-dev REGISTRY=$(STAGING_REGISTRY) RELEASE_TAG=$(RELEASE_ALIAS_TAG) + ## Build the dev manifests + $(MAKE) release-manifests-dev + # Example manifest location: https://storage.googleapis.com/k8s-staging-cluster-api/components/main/core-components.yaml + # Please note that these files are deleted after a certain period, at the time of this writing 60 days after file creation. + gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(RELEASE_ALIAS_TAG) .PHONY: release-staging-nightly release-staging-nightly: ## Tag and push container images to the staging bucket. Example image tag: cluster-api-controller:nightly_main_20210121 @@ -950,7 +1153,8 @@ release-staging-nightly: ## Tag and push container images to the staging bucket. $(MAKE) manifest-modification-dev REGISTRY=$(STAGING_REGISTRY) RELEASE_TAG=$(NEW_RELEASE_ALIAS_TAG) ## Build the dev manifests $(MAKE) release-manifests-dev - # Example manifest location: artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20210121/bootstrap-components.yaml + # Example manifest location: https://storage.googleapis.com/k8s-staging-cluster-api/components/nightly_main_20240425/core-components.yaml + # Please note that these files are deleted after a certain period, at the time of this writing 60 days after file creation. gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(NEW_RELEASE_ALIAS_TAG) .PHONY: release-alias-tag @@ -960,15 +1164,28 @@ release-alias-tag: ## Add the release alias tag to the last build tag gcloud container images add-tag $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) gcloud container images add-tag $(CLUSTERCTL_IMG):$(TAG) $(CLUSTERCTL_IMG):$(RELEASE_ALIAS_TAG) gcloud container images add-tag $(CAPD_CONTROLLER_IMG):$(TAG) $(CAPD_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) + gcloud container images add-tag $(CAPIM_CONTROLLER_IMG):$(TAG) $(CAPIM_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) gcloud container images add-tag $(TEST_EXTENSION_IMG):$(TAG) $(TEST_EXTENSION_IMG):$(RELEASE_ALIAS_TAG) +.PHONY: release-notes-tool +release-notes-tool: + go build -C hack/tools -o $(ROOT_DIR)/bin/notes -tags tools sigs.k8s.io/cluster-api/hack/tools/release/notes + .PHONY: release-notes -release-notes: $(RELEASE_NOTES_DIR) $(RELEASE_NOTES) - if [ -n "${PRE_RELEASE}" ]; then \ - echo ":rotating_light: This is a RELEASE CANDIDATE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new)." > $(RELEASE_NOTES_DIR)/$(RELEASE_TAG).md; \ - else \ - go run ./hack/tools/release/notes.go --from=$(PREVIOUS_TAG) > $(RELEASE_NOTES_DIR)/$(RELEASE_TAG).md; \ - fi +release-notes: release-notes-tool + ./bin/notes --release $(RELEASE_TAG) > CHANGELOG/$(RELEASE_TAG).md + +.PHONY: test-release-notes-tool +test-release-notes-tool: + go test -C hack/tools -v -tags tools,integration sigs.k8s.io/cluster-api/hack/tools/release/notes + +.PHONY: release-provider-issues-tool +release-provider-issues-tool: # Creates GitHub issues in a pre-defined list of CAPI provider repositories + @go run ./hack/tools/release/internal/update_providers/provider_issues.go + +.PHONY: release-weekly-update-tool +release-weekly-update-tool: + go build -C hack/tools -o $(ROOT_DIR)/bin/weekly -tags tools sigs.k8s.io/cluster-api/hack/tools/release/weekly .PHONY: promote-images promote-images: $(KPROMO) @@ -978,26 +1195,23 @@ promote-images: $(KPROMO) ## Docker ## -------------------------------------- +.PHONY: docker-image-verify +docker-image-verify: ## Verifies all built images to contain the correct binary in the expected arch + ALL_ARCH="$(ALL_ARCH)" TAG="$(TAG)" ./hack/docker-image-verify.sh + .PHONY: docker-push-all docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) ## Push the docker images to be included in the release for all architectures + related multiarch manifests - $(MAKE) docker-push-manifest-core - $(MAKE) docker-push-manifest-kubeadm-bootstrap - $(MAKE) docker-push-manifest-kubeadm-control-plane - $(MAKE) docker-push-manifest-docker-infrastructure - $(MAKE) docker-push-manifest-test-extension - $(MAKE) docker-push-clusterctl + $(MAKE) ALL_ARCH="$(ALL_ARCH)" $(addprefix docker-push-manifest-,$(ALL_DOCKER_BUILD)) docker-push-%: $(MAKE) ARCH=$* docker-push .PHONY: docker-push -docker-push: ## Push the docker images to be included in the release +docker-push: $(addprefix docker-push-,$(ALL_DOCKER_BUILD)) ## Push the docker images to be included in the release + +.PHONY: docker-push-core +docker-push-core: ## Push the core docker image docker push $(CONTROLLER_IMG)-$(ARCH):$(TAG) - docker push $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG) - docker push $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG) - docker push $(CLUSTERCTL_IMG)-$(ARCH):$(TAG) - docker push $(CAPD_CONTROLLER_IMG)-$(ARCH):$(TAG) - docker push $(TEST_EXTENSION_IMG)-$(ARCH):$(TAG) .PHONY: docker-push-manifest-core docker-push-manifest-core: ## Push the multiarch manifest for the core docker images @@ -1007,6 +1221,10 @@ docker-push-manifest-core: ## Push the multiarch manifest for the core docker im $(MAKE) set-manifest-image MANIFEST_IMG=$(CONTROLLER_IMG) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./config/default/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./config/default/manager_pull_policy.yaml" +.PHONY: docker-push-kubeadm-bootstrap +docker-push-kubeadm-bootstrap: ## Push the kubeadm bootstrap docker image + docker push $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG) + .PHONY: docker-push-manifest-kubeadm-bootstrap docker-push-manifest-kubeadm-bootstrap: ## Push the multiarch manifest for the kubeadm bootstrap docker images docker manifest create --amend $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(KUBEADM_BOOTSTRAP_CONTROLLER_IMG)\-&:$(TAG)~g") @@ -1015,6 +1233,10 @@ docker-push-manifest-kubeadm-bootstrap: ## Push the multiarch manifest for the k $(MAKE) set-manifest-image MANIFEST_IMG=$(KUBEADM_BOOTSTRAP_CONTROLLER_IMG) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./bootstrap/kubeadm/config/default/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./bootstrap/kubeadm/config/default/manager_pull_policy.yaml" +.PHONY: docker-push-kubeadm-control-plane +docker-push-kubeadm-control-plane: ## Push the kubeadm control plane docker image + docker push $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG) + .PHONY: docker-push-manifest-kubeadm-control-plane docker-push-manifest-kubeadm-control-plane: ## Push the multiarch manifest for the kubeadm control plane docker images docker manifest create --amend $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG)\-&:$(TAG)~g") @@ -1023,6 +1245,10 @@ docker-push-manifest-kubeadm-control-plane: ## Push the multiarch manifest for t $(MAKE) set-manifest-image MANIFEST_IMG=$(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./controlplane/kubeadm/config/default/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./controlplane/kubeadm/config/default/manager_pull_policy.yaml" +.PHONY: docker-push-docker-infrastructure +docker-push-docker-infrastructure: ## Push the docker infrastructure provider image + docker push $(CAPD_CONTROLLER_IMG)-$(ARCH):$(TAG) + .PHONY: docker-push-manifest-docker-infrastructure docker-push-manifest-docker-infrastructure: ## Push the multiarch manifest for the docker infrastructure provider images docker manifest create --amend $(CAPD_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CAPD_CONTROLLER_IMG)\-&:$(TAG)~g") @@ -1031,6 +1257,21 @@ docker-push-manifest-docker-infrastructure: ## Push the multiarch manifest for t $(MAKE) set-manifest-image MANIFEST_IMG=$(CAPD_CONTROLLER_IMG) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="$(CAPD_DIR)/config/default/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="$(CAPD_DIR)/config/default/manager_pull_policy.yaml" +.PHONY: docker-push-in-memory-infrastructure +docker-push-in-memory-infrastructure: ## Push the in-memory infrastructure provider image + docker push $(CAPIM_CONTROLLER_IMG)-$(ARCH):$(TAG) + +.PHONY: docker-push-manifest-in-memory-infrastructure +docker-push-manifest-in-memory-infrastructure: ## Push the multiarch manifest for the in-memory infrastructure provider images + docker manifest create --amend $(CAPIM_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CAPIM_CONTROLLER_IMG)\-&:$(TAG)~g") + @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CAPIM_CONTROLLER_IMG}:${TAG} ${CAPIM_CONTROLLER_IMG}-$${arch}:${TAG}; done + docker manifest push --purge $(CAPIM_CONTROLLER_IMG):$(TAG) + $(MAKE) set-manifest-image MANIFEST_IMG=$(CAPIM_CONTROLLER_IMG) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="$(CAPIM_DIR)/config/default/manager_image_patch.yaml" + $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="$(CAPIM_DIR)/config/default/manager_pull_policy.yaml" + +.PHONY: docker-push-test-extension +docker-push-test-extension: ## Push the test extension provider image + docker push $(TEST_EXTENSION_IMG)-$(ARCH):$(TAG) .PHONY: docker-push-manifest-test-extension docker-push-manifest-test-extension: ## Push the multiarch manifest for the test extension provider images @@ -1041,7 +1282,11 @@ docker-push-manifest-test-extension: ## Push the multiarch manifest for the test $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./test/extension/config/default/manager_pull_policy.yaml" .PHONY: docker-push-clusterctl -docker-push-clusterctl: ## Push the clusterctl images +docker-push-clusterctl: ## Push the clusterctl image + docker push $(CLUSTERCTL_IMG)-$(ARCH):$(TAG) + +.PHONY: docker-push-manifest-clusterctl +docker-push-manifest-clusterctl: ## Push the multiarch manifest for the clusterctl images docker manifest create --amend $(CLUSTERCTL_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CLUSTERCTL_IMG)\-&:$(TAG)~g") @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CLUSTERCTL_IMG}:${TAG} ${CLUSTERCTL_IMG}-$${arch}:${TAG}; done docker manifest push --purge $(CLUSTERCTL_IMG):$(TAG) @@ -1084,10 +1329,11 @@ clean-tilt: clean-charts clean-kind ## Remove all files generated by Tilt rm -rf ./controlplane/kubeadm/.tiltbuild rm -rf ./bootstrap/kubeadm/.tiltbuild rm -rf ./test/infrastructure/docker/.tiltbuild + rm -rf ./test/infrastructure/inmemory/.tiltbuild .PHONY: clean-charts clean-charts: ## Remove all local copies of Helm charts in ./hack/observability - (for path in "./hack/observability/*"; do rm -rf $$path/charts ; done) + (for path in "./hack/observability/*"; do rm -rf $$path/.charts ; done) .PHONY: clean-book clean-book: ## Remove all generated GitBook files @@ -1140,6 +1386,9 @@ $(OPENAPI_GEN_BIN): $(OPENAPI_GEN) ## Build a local copy of openapi-gen. .PHONY: $(RUNTIME_OPENAPI_GEN_BIN) $(RUNTIME_OPENAPI_GEN_BIN): $(RUNTIME_OPENAPI_GEN) ## Build a local copy of runtime-openapi-gen. +.PHONY: $(PROWJOB_GEN_BIN) +$(PROWJOB_GEN_BIN): $(PROWJOB_GEN) ## Build a local copy of prowjob-gen. + .PHONY: $(CONVERSION_VERIFIER_BIN) $(CONVERSION_VERIFIER_BIN): $(CONVERSION_VERIFIER) ## Build a local copy of conversion-verifier. @@ -1167,11 +1416,17 @@ $(YQ_BIN): $(YQ) ## Build a local copy of yq .PHONY: $(TILT_PREPARE_BIN) $(TILT_PREPARE_BIN): $(TILT_PREPARE) ## Build a local copy of tilt-prepare. +.PHONY: $(GINKGO_BIN) +$(GINKGO_BIN): $(GINKGO) ## Build a local copy of ginkgo. + .PHONY: $(GOLANGCI_LINT_BIN) -$(GOLANGCI_LINT_BIN): $(GOLANGCI_LINT) ## Build a local copy of golangci-lint +$(GOLANGCI_LINT_BIN): $(GOLANGCI_LINT) ## Build a local copy of golangci-lint. -.PHONY: $(GINKGO_BIN) -$(GINKGO_BIN): $(GINKGO) ## Build a local copy of ginkgo +.PHONY: $(GOVULNCHECK_BIN) +$(GOVULNCHECK_BIN): $(GOVULNCHECK) ## Build a local copy of govulncheck. + +.PHONY: $(IMPORT_BOSS_BIN) +$(IMPORT_BOSS_BIN): $(IMPORT_BOSS) $(CONTROLLER_GEN): # Build controller-gen from tools folder. GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER) @@ -1194,6 +1449,10 @@ $(OPENAPI_GEN): # Build openapi-gen from tools folder. $(RUNTIME_OPENAPI_GEN): $(TOOLS_DIR)/go.mod # Build openapi-gen from tools folder. cd $(TOOLS_DIR); go build -tags=tools -o $(BIN_DIR)/$(RUNTIME_OPENAPI_GEN_BIN) sigs.k8s.io/cluster-api/hack/tools/runtime-openapi-gen +.PHONY: $(PROWJOB_GEN) +$(PROWJOB_GEN): $(TOOLS_DIR)/go.mod # Build prowjob-gen from tools folder. + cd $(TOOLS_DIR); go build -tags=tools -o $(BIN_DIR)/$(PROWJOB_GEN_BIN) sigs.k8s.io/cluster-api/hack/tools/prowjob-gen + $(GOTESTSUM): # Build gotestsum from tools folder. GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GOTESTSUM_PKG) $(GOTESTSUM_BIN) $(GOTESTSUM_VER) @@ -1210,7 +1469,7 @@ $(SETUP_ENVTEST): # Build setup-envtest from tools folder. GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(SETUP_ENVTEST_PKG) $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER) $(TILT_PREPARE): $(TOOLS_DIR)/go.mod # Build tilt-prepare from tools folder. - cd $(TOOLS_DIR); go build -tags=tools -o $(BIN_DIR)/tilt-prepare sigs.k8s.io/cluster-api/hack/tools/tilt-prepare + cd $(TOOLS_DIR); go build -tags=tools -o $(BIN_DIR)/tilt-prepare sigs.k8s.io/cluster-api/hack/tools/internal/tilt-prepare $(KPROMO): GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(KPROMO_PKG) $(KPROMO_BIN) ${KPROMO_VER} @@ -1218,13 +1477,71 @@ $(KPROMO): $(YQ): GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(YQ_PKG) $(YQ_BIN) ${YQ_VER} -$(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golangci-lint using hack script into tools folder. - hack/ensure-golangci-lint.sh \ - -b $(TOOLS_BIN_DIR) \ - $(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version: | sed 's/.*version: //') - $(GINKGO): # Build ginkgo from tools folder. - GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GINKGO_PKG) $(GINKGO_BIN) $(GINGKO_VER) + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GINKGO_PKG) $(GINKGO_BIN) $(GINKGO_VER) + +$(GOLANGCI_LINT): # Build golangci-lint from tools folder. + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GOLANGCI_LINT_PKG) $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER) + +$(GOVULNCHECK): # Build govulncheck. + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GOVULNCHECK_PKG) $(GOVULNCHECK_BIN) $(GOVULNCHECK_VER) + +$(IMPORT_BOSS): # Build import-boss + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(IMPORT_BOSS_PKG) $(IMPORT_BOSS_BIN) $(IMPORT_BOSS_VER) + +## -------------------------------------- +## triage-party +## -------------------------------------- + +.PHONY: release-triage-party +release-triage-party: docker-build-triage-party docker-push-triage-party clean-triage-party + +.PHONY: release-triage-party-local +release-triage-party-local: docker-build-triage-party clean-triage-party ## Release the triage party image for local use only + +.PHONY: checkout-triage-party +checkout-triage-party: + @if [ -z "${TRIAGE_PARTY_VERSION}" ]; then echo "TRIAGE_PARTY_VERSION is not set"; exit 1; fi + @if [ -d "$(TRIAGE_PARTY_TMP_DIR)" ]; then \ + echo "$(TRIAGE_PARTY_TMP_DIR) exists, skipping clone"; \ + else \ + git clone "https://github.com/google/triage-party.git" "$(TRIAGE_PARTY_TMP_DIR)"; \ + cd "$(TRIAGE_PARTY_TMP_DIR)"; \ + git checkout "$(TRIAGE_PARTY_VERSION)"; \ + git apply "$(ROOT_DIR)/$(TRIAGE_PARTY_DIR)/triage-improvements.patch"; \ + fi + @cd "$(ROOT_DIR)/$(TRIAGE_PARTY_TMP_DIR)"; \ + if [ "$$(git describe --tag 2> /dev/null)" != "$(TRIAGE_PARTY_VERSION)" ]; then \ + echo "ERROR: checked out version $$(git describe --tag 2> /dev/null) does not match expected version $(TRIAGE_PARTY_VERSION)"; \ + exit 1; \ + fi + +.PHONY: docker-build-triage-party +docker-build-triage-party: checkout-triage-party + @if [ -z "${TRIAGE_PARTY_VERSION}" ]; then echo "TRIAGE_PARTY_VERSION is not set"; exit 1; fi + cd $(TRIAGE_PARTY_TMP_DIR) && \ + docker buildx build --platform linux/amd64 -t $(TRIAGE_PARTY_CONTROLLER_IMG):$(TRIAGE_PARTY_VERSION) . + +.PHONY: docker-push-triage-party +docker-push-triage-party: + @if [ -z "${TRIAGE_PARTY_VERSION}" ]; then echo "TRIAGE_PARTY_VERSION is not set"; exit 1; fi + docker push $(TRIAGE_PARTY_CONTROLLER_IMG):$(TRIAGE_PARTY_VERSION) + +.PHONY: clean-triage-party +clean-triage-party: + rm -fr "$(TRIAGE_PARTY_TMP_DIR)" + +.PHONY: triage-party +triage-party: ## Start a local instance of triage party + @if [ -z "${GITHUB_TOKEN}" ]; then echo "GITHUB_TOKEN is not set"; exit 1; fi + docker run --platform linux/amd64 --rm \ + -e GITHUB_TOKEN \ + -e "PERSIST_BACKEND=disk" \ + -e "PERSIST_PATH=/app/.cache" \ + -v "$(ROOT_DIR)/$(TRIAGE_PARTY_DIR)/.cache:/app/.cache" \ + -v "$(ROOT_DIR)/$(TRIAGE_PARTY_DIR)/config.yaml:/app/config/config.yaml" \ + -p 8080:8080 \ + $(TRIAGE_PARTY_CONTROLLER_IMG):$(TRIAGE_PARTY_VERSION) ## -------------------------------------- ## Helpers diff --git a/OWNERS b/OWNERS index a09d4c4da582..538895723556 100644 --- a/OWNERS +++ b/OWNERS @@ -11,9 +11,11 @@ reviewers: - cluster-api-reviewers emeritus_approvers: + - CecileRobertMichon - chuckha - detiber - kris-nova - ncdc - roberthbailey - davidewatson + - ykakarap diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 547c80d2bd37..d883b9493f35 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -14,12 +14,12 @@ aliases: # active folks who can be contacted to perform admin-related # tasks on the repo, or otherwise approve any PRS. cluster-api-admins: - - CecileRobertMichon + - fabriziopandini - vincepri # non-admin folks who have write-access and can approve any PRs in the repo cluster-api-maintainers: - - CecileRobertMichon + - chrischdi - enxebre - fabriziopandini - killianmuldoon @@ -32,7 +32,6 @@ aliases: - JoelSpeed - richardcase - stmcginnis - - ykakarap # ----------------------------------------------------------- # OWNER_ALIASES for controllers/topology @@ -40,7 +39,6 @@ aliases: cluster-api-topology-maintainers: cluster-api-topology-reviewers: - - chrischdi # ----------------------------------------------------------- # OWNER_ALIASES for bootstrap/kubeadm @@ -73,7 +71,6 @@ aliases: # ----------------------------------------------------------- cluster-api-clusterctl-maintainers: - - ykakarap cluster-api-clusterctl-reviewers: - Jont828 @@ -83,7 +80,6 @@ aliases: cluster-api-test-maintainers: cluster-api-test-reviewers: - - chrischdi - elmiko # ----------------------------------------------------------- @@ -92,7 +88,6 @@ aliases: cluster-api-test-framework-maintainers: cluster-api-test-framework-reviewers: - - chrischdi - elmiko # ----------------------------------------------------------- @@ -101,9 +96,15 @@ aliases: cluster-api-provider-docker-maintainers: cluster-api-provider-docker-reviewers: - - chrischdi - elmiko + # ----------------------------------------------------------- + # OWNER_ALIASES for test/infrastructure/inmemory + # ----------------------------------------------------------- + + cluster-api-provider-inmemory-maintainers: + cluster-api-provider-inmemory-reviewers: + # ----------------------------------------------------------- # OWNER_ALIASES for docs # ----------------------------------------------------------- @@ -112,3 +113,31 @@ aliases: - oscr cluster-api-docs-reviewers: - elmiko + + # ----------------------------------------------------------- + # OWNER_ALIASES for v1.6 release-team + # ----------------------------------------------------------- + + cluster-api-release-lead: + - adilGhaffarDev + + cluster-api-release-team: + # members added in commented lines have a pending membership + # and will be added back once it is acquired. + - adilGhaffarDev # release lead + # - chandankumar4 # release comms lead + - chiukapoor + # - dhij + - hackeramitkumar + - jayesh-srivastava + # - kperath + # - meatballhat + # - Nivedita-coder + - pravarag + # - rajankumary2k + # - shipra101 + # - smoshiur1237 + # - Sunnatillo # release ci lead + - troy0820 + - vishalanarase + - willie-yao diff --git a/PROJECT b/PROJECT deleted file mode 100644 index 6cff7382a4b6..000000000000 --- a/PROJECT +++ /dev/null @@ -1,49 +0,0 @@ -domain: x-k8s.io -repo: sigs.k8s.io/cluster-api -resources: -# v1alpha3 types -- group: cluster - kind: Cluster - version: v1alpha3 -- group: cluster - kind: Machine - version: v1alpha3 -- group: cluster - kind: MachineSet - version: v1alpha3 -- group: cluster - kind: MachineDeployment - version: v1alpha3 -# v1alpha4 types -- group: cluster - kind: ClusterClass - version: v1alpha4 -- group: cluster - kind: Cluster - version: v1alpha4 -- group: cluster - kind: Machine - version: v1alpha4 -- group: cluster - kind: MachineSet - version: v1alpha4 -- group: cluster - kind: MachineDeployment - version: v1alpha4 -# v1beta1 types -- group: cluster - kind: ClusterClass - version: v1beta1 -- group: cluster - kind: Cluster - version: v1beta1 -- group: cluster - kind: Machine - version: v1beta1 -- group: cluster - kind: MachineSet - version: v1beta1 -- group: cluster - kind: MachineDeployment - version: v1beta1 -version: "2" diff --git a/README.md b/README.md index 51d34f68bd82..b4a3b9d5d175 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,8 @@ + +GitHub release (latest SemVer)

# Cluster API @@ -11,9 +13,7 @@ ### 👋 Welcome to our project! Our [Book](https://cluster-api.sigs.k8s.io) can help you get started and provides lots of in-depth information. #### Useful links -- [Scope, objectives, goals and requirements](./docs/scope-and-objectives.md) - [Feature proposals](./docs/proposals) -- [Reference use cases](./docs/staging-use-cases.md) - [Quick Start](https://cluster-api.sigs.k8s.io/user/quick-start.html) ## ✨ What is the Cluster API? diff --git a/REVIEWING.md b/REVIEWING.md index d3248bd439d3..d0897dce90a8 100644 --- a/REVIEWING.md +++ b/REVIEWING.md @@ -106,7 +106,7 @@ By extension, the Cluster API contract includes all the util methods that Cluste making the development of providers simpler and consistent (e.g. everything under `/util` or in `/test/framework`); documentation of the utility is available [here](https://pkg.go.dev/sigs.k8s.io/cluster-api?tab=subdirectories). -The Cluster API contract is linked to the version of the API (e.g. v1alpha3 Contract), and it is expected to +The Cluster API contract is linked to the version of the API (e.g. v1beta1 Contract), and it is expected to provide the same set of guarantees in terms of support window, stability, and upgradability. This makes any change that can impact the Cluster API contract critical and usually: diff --git a/Tiltfile b/Tiltfile index 306b42cc6da4..ee97b3d15b90 100644 --- a/Tiltfile +++ b/Tiltfile @@ -3,18 +3,10 @@ envsubst_cmd = "./hack/tools/bin/envsubst" clusterctl_cmd = "./bin/clusterctl" kubectl_cmd = "kubectl" -default_build_engine = "docker" -kubernetes_version = "v1.26.0" - -if str(local("command -v " + kubectl_cmd + " || true", quiet = True)) == "": - fail("Required command '" + kubectl_cmd + "' not found in PATH") +kubernetes_version = "v1.30.0" load("ext://uibutton", "cmd_button", "location", "text_input") -# detect if docker images should be built using podman -if "Podman Engine" in str(local("docker version || podman version", quiet = True)): - default_build_engine = "podman" - # set defaults version_settings(True, ">=0.30.8") @@ -22,7 +14,7 @@ settings = { "enable_providers": ["docker"], "kind_cluster_name": os.getenv("CAPI_KIND_CLUSTER_NAME", "capi-test"), "debug": {}, - "build_engine": default_build_engine, + "build_engine": "docker", } # global settings @@ -36,13 +28,29 @@ os.putenv("CAPI_KIND_CLUSTER_NAME", settings.get("kind_cluster_name")) allow_k8s_contexts(settings.get("allowed_contexts")) +if str(local("command -v " + kubectl_cmd + " || true", quiet = True)) == "": + fail("Required command '" + kubectl_cmd + "' not found in PATH") + +# detect if docker images should be built using podman +if "Podman Engine" in str(local("docker version || podman version", quiet = True)): + settings["build_engine"] = "podman" + os_name = str(local("go env GOOS")).rstrip("\n") os_arch = str(local("go env GOARCH")).rstrip("\n") if settings.get("trigger_mode") == "manual": trigger_mode(TRIGGER_MODE_MANUAL) -if settings.get("default_registry") != "": +usingLocalRegistry = str(local(kubectl_cmd + " get cm -n kube-public local-registry-hosting || true", quiet = True)) +if not usingLocalRegistry: + if settings.get("default_registry", "") == "": + fail("default_registry is required when not using a local registry, please add it to your tilt-settings.yaml/json") + + protectedRegistries = ["gcr.io/k8s-staging-cluster-api"] + if settings.get("default_registry") in protectedRegistries: + fail("current default_registry '{}' is protected, tilt cannot push images to it. Please select another default_registry in your tilt-settings.yaml/json".format(settings.get("default_registry"))) + +if settings.get("default_registry", "") != "": default_registry(settings.get("default_registry")) always_enable_providers = ["core"] @@ -103,15 +111,26 @@ providers = { "../../go.sum", "../container", "api", - "cloudinit", "controllers", "docker", "exp", "internal", - "third_party", ], "label": "CAPD", }, + "in-memory": { + "context": "test/infrastructure/inmemory", # NOTE: this should be kept in sync with corresponding setting in tilt-prepare + "image": "gcr.io/k8s-staging-cluster-api/capim-manager", + "live_reload_deps": [ + "main.go", + "../../go.mod", + "../../go.sum", + "api", + "controllers", + "internal", + ], + "label": "CAPIM", + }, "test-extension": { "context": "test/extension", # NOTE: this should be kept in sync with corresponding setting in tilt-prepare "image": "gcr.io/k8s-staging-cluster-api/test-extension", @@ -165,9 +184,10 @@ def load_provider_tiltfiles(): tilt_helper_dockerfile_header = """ # Tilt image -FROM golang:1.19.6 as tilt-helper +FROM golang:1.22.3 as tilt-helper +# Install delve. Note this should be kept in step with the Go release minor version. +RUN go install github.com/go-delve/delve/cmd/dlv@v1.22 # Support live reloading with Tilt -RUN go install github.com/go-delve/delve/cmd/dlv@latest RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/tilt-dev/rerun-process-wrapper/master/restart.sh && \ wget --output-document /start.sh --quiet https://raw.githubusercontent.com/tilt-dev/rerun-process-wrapper/master/start.sh && \ chmod +x /start.sh && chmod +x /restart.sh && chmod +x /go/bin/dlv && \ @@ -175,7 +195,7 @@ RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com """ tilt_dockerfile_header = """ -FROM gcr.io/distroless/base:debug as tilt +FROM golang:1.22.3 as tilt WORKDIR / COPY --from=tilt-helper /process.txt . COPY --from=tilt-helper /start.sh . @@ -228,9 +248,15 @@ def build_go_binary(context, reload_deps, debug, go_main, binary_name, label): live_reload_deps = [] for d in reload_deps: live_reload_deps.append(context + "/" + d) + + # Ensure the {context}/.tiltbuild/bin directory before any other resources + # `local` is evaluated immediately, other resources are executed later in the startup/when triggered + local("mkdir -p {context}/.tiltbuild/bin".format(context = shlex.quote(context)), quiet = True) + + # Build the go binary local_resource( label.lower() + "_binary", - cmd = "cd {context};mkdir -p .tiltbuild/bin;{build_cmd}".format( + cmd = "cd {context};{build_cmd}".format( context = context, build_cmd = build_cmd, ), @@ -316,23 +342,24 @@ def enable_provider(name, debug): port_forwards, links = get_port_forwards(debug) - build_go_binary( - context = p.get("context"), - reload_deps = p.get("live_reload_deps"), - debug = debug, - go_main = p.get("go_main", "main.go"), - binary_name = "manager", - label = label, - ) + if p.get("image"): + build_go_binary( + context = p.get("context"), + reload_deps = p.get("live_reload_deps"), + debug = debug, + go_main = p.get("go_main", "main.go"), + binary_name = "manager", + label = label, + ) - build_docker_image( - image = p.get("image"), - context = p.get("context"), - binary_name = "manager", - additional_docker_helper_commands = p.get("additional_docker_helper_commands", ""), - additional_docker_build_commands = p.get("additional_docker_build_commands", ""), - port_forwards = port_forwards, - ) + build_docker_image( + image = p.get("image"), + context = p.get("context"), + binary_name = "manager", + additional_docker_helper_commands = p.get("additional_docker_helper_commands", ""), + additional_docker_build_commands = p.get("additional_docker_build_commands", ""), + port_forwards = port_forwards, + ) additional_objs = [] p_resources = p.get("additional_resources", []) @@ -340,9 +367,9 @@ def enable_provider(name, debug): k8s_yaml(p.get("context") + "/" + resource) additional_objs = additional_objs + decode_yaml_stream(read_file(p.get("context") + "/" + resource)) - if p.get("kustomize_config", True): + if p.get("apply_provider_yaml", True): yaml = read_file("./.tiltbuild/yaml/{}.provider.yaml".format(name)) - k8s_yaml(yaml) + k8s_yaml(yaml, allow_duplicates = True) objs = decode_yaml_stream(yaml) k8s_resource( workload = find_object_name(objs, "Deployment"), @@ -356,7 +383,8 @@ def enable_provider(name, debug): def find_object_name(objs, kind): for o in objs: - if o["kind"] == kind: + # Ignore objects that are not part of the provider, e.g. the ASO Deployment in CAPZ. + if o["kind"] == kind and "cluster.x-k8s.io/provider" in o["metadata"]["labels"]: return o["metadata"]["name"] return "" @@ -403,15 +431,15 @@ def deploy_provider_crds(): def deploy_observability(): if "promtail" in settings.get("deploy_observability", []): k8s_yaml(read_file("./.tiltbuild/yaml/promtail.observability.yaml"), allow_duplicates = True) - k8s_resource(workload = "promtail", extra_pod_selectors = [{"app": "promtail"}], labels = ["observability"], resource_deps = ["loki"]) + k8s_resource(workload = "promtail", extra_pod_selectors = [{"app": "promtail"}], labels = ["observability"], resource_deps = ["loki"], objects = ["promtail:serviceaccount"]) if "loki" in settings.get("deploy_observability", []): k8s_yaml(read_file("./.tiltbuild/yaml/loki.observability.yaml"), allow_duplicates = True) - k8s_resource(workload = "loki", port_forwards = "3100", extra_pod_selectors = [{"app": "loki"}], labels = ["observability"]) + k8s_resource(workload = "loki", port_forwards = "3100", extra_pod_selectors = [{"app": "loki"}], labels = ["observability"], objects = ["loki:serviceaccount"]) cmd_button( "loki:import logs", - argv = ["sh", "-c", "cd ./hack/tools/log-push && go run ./main.go --log-path=$LOG_PATH"], + argv = ["sh", "-c", "cd ./hack/tools/internal/log-push && go run ./main.go --log-path=$LOG_PATH"], resource = "loki", icon_name = "import_export", text = "Import logs", @@ -420,17 +448,31 @@ def deploy_observability(): ], ) + if "tempo" in settings.get("deploy_observability", []): + k8s_yaml(read_file("./.tiltbuild/yaml/tempo.observability.yaml"), allow_duplicates = True) + + # Port-forward the tracing port to localhost, so we can also send traces from local. + k8s_resource(workload = "tempo", port_forwards = "4317:4317", extra_pod_selectors = [{"app": "tempo"}], labels = ["observability"]) + if "grafana" in settings.get("deploy_observability", []): k8s_yaml(read_file("./.tiltbuild/yaml/grafana.observability.yaml"), allow_duplicates = True) k8s_resource(workload = "grafana", port_forwards = "3001:3000", extra_pod_selectors = [{"app": "grafana"}], labels = ["observability"], objects = ["grafana:serviceaccount"]) if "prometheus" in settings.get("deploy_observability", []): k8s_yaml(read_file("./.tiltbuild/yaml/prometheus.observability.yaml"), allow_duplicates = True) - k8s_resource(workload = "prometheus-server", new_name = "prometheus", port_forwards = "9090", extra_pod_selectors = [{"app": "prometheus"}], labels = ["observability"]) + k8s_resource(workload = "prometheus-server", new_name = "prometheus", port_forwards = "9090", extra_pod_selectors = [{"app": "prometheus"}], labels = ["observability"], objects = ["prometheus-server:serviceaccount"]) if "kube-state-metrics" in settings.get("deploy_observability", []): k8s_yaml(read_file("./.tiltbuild/yaml/kube-state-metrics.observability.yaml"), allow_duplicates = True) - k8s_resource(workload = "kube-state-metrics", new_name = "kube-state-metrics", extra_pod_selectors = [{"app": "kube-state-metrics"}], labels = ["observability"]) + k8s_resource(workload = "kube-state-metrics", new_name = "kube-state-metrics", extra_pod_selectors = [{"app": "kube-state-metrics"}], labels = ["observability"], objects = ["kube-state-metrics:serviceaccount"]) + + if "parca" in settings.get("deploy_observability", []): + k8s_yaml(read_file("./.tiltbuild/yaml/parca.observability.yaml"), allow_duplicates = True) + k8s_resource(workload = "parca", new_name = "parca", port_forwards = "7070", extra_pod_selectors = [{"app": "parca"}], labels = ["observability"], objects = ["parca:serviceaccount"]) + + if "metrics-server" in settings.get("deploy_observability", []): + k8s_yaml(read_file("./.tiltbuild/yaml/metrics-server.observability.yaml"), allow_duplicates = True) + k8s_resource(workload = "metrics-server", new_name = "metrics-server", extra_pod_selectors = [{"app": "metrics-server"}], labels = ["observability"], objects = ["metrics-server:serviceaccount"]) if "visualizer" in settings.get("deploy_observability", []): k8s_yaml(read_file("./.tiltbuild/yaml/visualizer.observability.yaml"), allow_duplicates = True) @@ -439,6 +481,20 @@ def deploy_observability(): new_name = "visualizer", port_forwards = [port_forward(local_port = 8000, container_port = 8081, name = "View visualization")], labels = ["observability"], + objects = ["capi-visualizer:serviceaccount"], + ) + +def deploy_additional_kustomizations(): + for name in settings.get("additional_kustomizations", []): + yaml = read_file("./.tiltbuild/yaml/{}.kustomization.yaml".format(name)) + k8s_yaml(yaml) + objs = decode_yaml_stream(yaml) + print("objects") + print(find_all_objects_names(objs)) + k8s_resource( + new_name = name, + objects = find_all_objects_names(objs), + labels = ["kustomization"], ) def prepare_all(): @@ -459,10 +515,11 @@ def cluster_templates(): substitutions["NAMESPACE"] = substitutions.get("NAMESPACE", "default") substitutions["KUBERNETES_VERSION"] = substitutions.get("KUBERNETES_VERSION", kubernetes_version) substitutions["CONTROL_PLANE_MACHINE_COUNT"] = substitutions.get("CONTROL_PLANE_MACHINE_COUNT", "1") - substitutions["WORKER_MACHINE_COUNT"] = substitutions.get("WORKER_MACHINE_COUNT", "3") + substitutions["WORKER_MACHINE_COUNT"] = substitutions.get("WORKER_MACHINE_COUNT", "1") template_dirs = settings.get("template_dirs", { "docker": ["./test/infrastructure/docker/templates"], + "in-memory": ["./test/infrastructure/inmemory/templates"], }) for provider, provider_dirs in template_dirs.items(): @@ -485,11 +542,14 @@ def deploy_templates(filename, label, substitutions): basename = os.path.basename(filename) if basename.endswith(".yaml"): if basename.startswith("clusterclass-"): - template_name = basename.replace("clusterclass-", "").replace(".yaml", "") - deploy_clusterclass(template_name, label, filename, substitutions) + clusterclass_name = basename.replace("clusterclass-", "").replace(".yaml", "") + deploy_clusterclass(clusterclass_name, label, filename, substitutions) elif basename.startswith("cluster-template-"): - clusterclass_name = basename.replace("cluster-template-", "").replace(".yaml", "") - deploy_cluster_template(clusterclass_name, label, filename, substitutions) + template_name = basename.replace("cluster-template-", "").replace(".yaml", "") + deploy_cluster_template(template_name, label, filename, substitutions) + elif basename == "cluster-template.yaml": + template_name = "default" + deploy_cluster_template(template_name, label, filename, substitutions) def deploy_clusterclass(clusterclass_name, label, filename, substitutions): apply_clusterclass_cmd = "cat " + filename + " | " + envsubst_cmd + " | " + kubectl_cmd + " apply --namespace=$NAMESPACE -f - && echo \"ClusterClass created from\'" + filename + "\', don't forget to delete\n\"" @@ -598,6 +658,8 @@ deploy_provider_crds() deploy_observability() +deploy_additional_kustomizations() + enable_providers() cluster_templates() diff --git a/api/.import-restrictions b/api/.import-restrictions new file mode 100644 index 000000000000..f6f10b3ff544 --- /dev/null +++ b/api/.import-restrictions @@ -0,0 +1,5 @@ +rules: + - selectorRegexp: sigs[.]k8s[.]io/controller-runtime + allowedPrefixes: + - "sigs.k8s.io/controller-runtime/pkg/conversion" + forbiddenPrefixes: [] diff --git a/api/v1alpha3/webhook_test.go b/api/v1alpha3/webhook_test.go deleted file mode 100644 index 11b229c6c925..000000000000 --- a/api/v1alpha3/webhook_test.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha3 - -import ( - "fmt" - "testing" - - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client" - - "sigs.k8s.io/cluster-api/util" -) - -func TestClusterConversion(t *testing.T) { - g := NewWithT(t) - ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) - g.Expect(err).ToNot(HaveOccurred()) - clusterName := fmt.Sprintf("test-cluster-%s", util.RandomString(5)) - cluster := &Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: ns.Name, - }, - } - - g.Expect(env.Create(ctx, cluster)).To(Succeed()) - defer func(do ...client.Object) { - g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) - }(ns, cluster) -} - -func TestMachineSetConversion(t *testing.T) { - g := NewWithT(t) - ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) - g.Expect(err).ToNot(HaveOccurred()) - - clusterName := fmt.Sprintf("test-cluster-%s", util.RandomString(5)) - machineSetName := fmt.Sprintf("test-machineset-%s", util.RandomString(5)) - machineSet := &MachineSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.Name, - Name: machineSetName, - }, - Spec: MachineSetSpec{ - ClusterName: clusterName, - Template: newFakeMachineTemplate(ns.Name, clusterName), - MinReadySeconds: 10, - Replicas: pointer.Int32(1), - DeletePolicy: "Random", - }, - } - - g.Expect(env.Create(ctx, machineSet)).To(Succeed()) - defer func(do ...client.Object) { - g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) - }(ns, machineSet) -} - -func TestMachineDeploymentConversion(t *testing.T) { - g := NewWithT(t) - ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) - g.Expect(err).ToNot(HaveOccurred()) - - clusterName := fmt.Sprintf("test-cluster-%s", util.RandomString(5)) - machineDeploymentName := fmt.Sprintf("test-machinedeployment-%s", util.RandomString(5)) - machineDeployment := &MachineDeployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: machineDeploymentName, - Namespace: ns.Name, - }, - Spec: MachineDeploymentSpec{ - ClusterName: clusterName, - Template: newFakeMachineTemplate(ns.Name, clusterName), - Replicas: pointer.Int32(0), - }, - } - - g.Expect(env.Create(ctx, machineDeployment)).To(Succeed()) - defer func(do ...client.Object) { - g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) - }(ns, machineDeployment) -} - -func newFakeMachineTemplate(namespace, clusterName string) MachineTemplateSpec { - return MachineTemplateSpec{ - Spec: MachineSpec{ - ClusterName: clusterName, - Bootstrap: Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha3", - Kind: "KubeadmConfigTemplate", - Name: fmt.Sprintf("%s-md-0", clusterName), - Namespace: namespace, - }, - }, - InfrastructureRef: corev1.ObjectReference{ - APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3", - Kind: "FakeMachineTemplate", - Name: fmt.Sprintf("%s-md-0", clusterName), - Namespace: namespace, - }, - Version: pointer.String("v1.20.2"), - }, - } -} diff --git a/api/v1beta1/.import-restrictions b/api/v1beta1/.import-restrictions new file mode 100644 index 000000000000..a2e1dfd08133 --- /dev/null +++ b/api/v1beta1/.import-restrictions @@ -0,0 +1,5 @@ +rules: + - selectorRegexp: sigs[.]k8s[.]io/controller-runtime + allowedPrefixes: [] + forbiddenPrefixes: + - "sigs.k8s.io/controller-runtime" diff --git a/api/v1beta1/cluster_types.go b/api/v1beta1/cluster_types.go index 9e3095837359..68818eb36e9f 100644 --- a/api/v1beta1/cluster_types.go +++ b/api/v1beta1/cluster_types.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" capierrors "sigs.k8s.io/cluster-api/errors" ) @@ -34,6 +34,9 @@ const ( // ClusterFinalizer is the finalizer used by the cluster controller to // cleanup the cluster resources when a Cluster is being deleted. ClusterFinalizer = "cluster.cluster.x-k8s.io" + + // ClusterKind represents the Kind of Cluster. + ClusterKind = "Cluster" ) // ANCHOR: ClusterSpec @@ -80,6 +83,9 @@ type Topology struct { // RolloutAfter performs a rollout of the entire cluster one component at a time, // control plane first and then machine deployments. + // + // Deprecated: This field has no function and is going to be removed in the next apiVersion. + // // +optional RolloutAfter *metav1.Time `json:"rolloutAfter,omitempty"` @@ -143,6 +149,10 @@ type WorkersTopology struct { // MachineDeployments is a list of machine deployments in the cluster. // +optional MachineDeployments []MachineDeploymentTopology `json:"machineDeployments,omitempty"` + + // MachinePools is a list of machine pools in the cluster. + // +optional + MachinePools []MachinePoolTopology `json:"machinePools,omitempty"` } // MachineDeploymentTopology specifies the different parameters for a set of worker nodes in the topology. @@ -234,6 +244,66 @@ type MachineHealthCheckTopology struct { MachineHealthCheckClass `json:",inline"` } +// MachinePoolTopology specifies the different parameters for a pool of worker nodes in the topology. +// This pool of nodes is managed by a MachinePool object whose lifecycle is managed by the Cluster controller. +type MachinePoolTopology struct { + // Metadata is the metadata applied to the MachinePool. + // At runtime this metadata is merged with the corresponding metadata from the ClusterClass. + // +optional + Metadata ObjectMeta `json:"metadata,omitempty"` + + // Class is the name of the MachinePoolClass used to create the pool of worker nodes. + // This should match one of the deployment classes defined in the ClusterClass object + // mentioned in the `Cluster.Spec.Class` field. + Class string `json:"class"` + + // Name is the unique identifier for this MachinePoolTopology. + // The value is used with other unique identifiers to create a MachinePool's Name + // (e.g. cluster's name, etc). In case the name is greater than the allowed maximum length, + // the values are hashed together. + Name string `json:"name"` + + // FailureDomains is the list of failure domains the machine pool will be created in. + // Must match a key in the FailureDomains map stored on the cluster object. + // +optional + FailureDomains []string `json:"failureDomains,omitempty"` + + // NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + // The default value is 0, meaning that the node can be drained without any time limitations. + // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // +optional + NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + + // NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + // +optional + NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + + // NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the MachinePool + // hosts after the MachinePool is marked for deletion. A duration of 0 will retry deletion indefinitely. + // Defaults to 10 seconds. + // +optional + NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + + // Minimum number of seconds for which a newly created machine pool should + // be ready. + // Defaults to 0 (machine will be considered available as soon as it + // is ready) + // +optional + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + + // Replicas is the number of nodes belonging to this pool. + // If the value is nil, the MachinePool is created without the number of Replicas (defaulting to 1) + // and it's assumed that an external entity (like cluster autoscaler) is responsible for the management + // of this value. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // Variables can be used to customize the MachinePool through patches. + // +optional + Variables *MachinePoolVariables `json:"variables,omitempty"` +} + // ClusterVariable can be used to customize the Cluster through patches. Each ClusterVariable is associated with a // Variable definition in the ClusterClass `status` variables. type ClusterVariable struct { @@ -264,6 +334,13 @@ type MachineDeploymentVariables struct { Overrides []ClusterVariable `json:"overrides,omitempty"` } +// MachinePoolVariables can be used to provide variables for a specific MachinePool. +type MachinePoolVariables struct { + // Overrides can be used to override Cluster level variables. + // +optional + Overrides []ClusterVariable `json:"overrides,omitempty"` +} + // ANCHOR_END: ClusterSpec // ANCHOR: ClusterNetwork @@ -335,7 +412,11 @@ type ClusterStatus struct { // +optional InfrastructureReady bool `json:"infrastructureReady"` - // ControlPlaneReady defines if the control plane is ready. + // ControlPlaneReady denotes if the control plane became ready during initial provisioning + // to receive requests. + // NOTE: this field is part of the Cluster API contract and it is used to orchestrate provisioning. + // The value of this field is never updated after provisioning is completed. Please use conditions + // to check the operational state of the control plane. // +optional ControlPlaneReady bool `json:"controlPlaneReady"` @@ -403,6 +484,7 @@ func (v APIEndpoint) String() string { // +kubebuilder:resource:path=clusters,shortName=cl,scope=Namespaced,categories=cluster-api // +kubebuilder:storageversion // +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="ClusterClass",type="string",JSONPath=".spec.topology.class",description="ClusterClass of this Cluster, empty if the Cluster is not using a ClusterClass" // +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Cluster status such as Pending/Provisioning/Provisioned/Deleting/Failed" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of Cluster" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.topology.version",description="Kubernetes version associated with this Cluster" @@ -427,8 +509,9 @@ func (c *Cluster) SetConditions(conditions Conditions) { } // GetIPFamily returns a ClusterIPFamily from the configuration provided. -// Note: IPFamily is not a concept in Kubernetes. It was originally introduced in CAPI for CAPD. -// IPFamily may be dropped in a future release. More details at https://github.com/kubernetes-sigs/cluster-api/issues/7521 +// +// Deprecated: IPFamily is not a concept in Kubernetes. It was originally introduced in CAPI for CAPD. +// IPFamily will be dropped in a future release. More details at https://github.com/kubernetes-sigs/cluster-api/issues/7521 func (c *Cluster) GetIPFamily() (ClusterIPFamily, error) { var podCIDRs, serviceCIDRs []string if c.Spec.ClusterNetwork != nil { @@ -522,7 +605,7 @@ type ClusterList struct { } func init() { - SchemeBuilder.Register(&Cluster{}, &ClusterList{}) + objectTypes = append(objectTypes, &Cluster{}, &ClusterList{}) } // FailureDomains is a slice of FailureDomains. @@ -544,7 +627,7 @@ func (in FailureDomains) FilterControlPlane() FailureDomains { func (in FailureDomains) GetIDs() []*string { ids := make([]*string, 0, len(in)) for id := range in { - ids = append(ids, pointer.String(id)) + ids = append(ids, ptr.To(id)) } return ids } diff --git a/api/v1beta1/cluster_types_test.go b/api/v1beta1/cluster_types_test.go index e1893918f444..089ed5eebd89 100644 --- a/api/v1beta1/cluster_types_test.go +++ b/api/v1beta1/cluster_types_test.go @@ -95,7 +95,7 @@ func TestClusterIPFamily(t *testing.T) { g := NewWithT(t) ipFamily, err := tt.c.GetIPFamily() g.Expect(ipFamily).To(Equal(tt.expectRes)) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } @@ -129,7 +129,7 @@ func TestClusterIPFamily(t *testing.T) { g := NewWithT(t) ipFamily, err := tt.c.GetIPFamily() g.Expect(ipFamily).To(Equal(tt.expectRes)) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } diff --git a/api/v1beta1/clusterclass_types.go b/api/v1beta1/clusterclass_types.go index b26eac69539e..27d7d86e44a9 100644 --- a/api/v1beta1/clusterclass_types.go +++ b/api/v1beta1/clusterclass_types.go @@ -25,6 +25,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) +// ClusterClassKind represents the Kind of ClusterClass. +const ClusterClassKind = "ClusterClass" + // +kubebuilder:object:root=true // +kubebuilder:resource:path=clusterclasses,shortName=cc,scope=Namespaced,categories=cluster-api // +kubebuilder:storageversion @@ -103,6 +106,10 @@ type ControlPlaneClass struct { // +optional MachineHealthCheck *MachineHealthCheckClass `json:"machineHealthCheck,omitempty"` + // NamingStrategy allows changing the naming pattern used when creating the control plane provider object. + // +optional + NamingStrategy *ControlPlaneClassNamingStrategy `json:"namingStrategy,omitempty"` + // NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. // The default value is 0, meaning that the node can be drained without any time limitations. // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` @@ -124,12 +131,30 @@ type ControlPlaneClass struct { NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` } +// ControlPlaneClassNamingStrategy defines the naming strategy for control plane objects. +type ControlPlaneClassNamingStrategy struct { + // Template defines the template to use for generating the name of the ControlPlane object. + // If not defined, it will fallback to `{{ .cluster.name }}-{{ .random }}`. + // If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + // get concatenated with a random suffix of length 5. + // The templating mechanism provides the following arguments: + // * `.cluster.name`: The name of the cluster object. + // * `.random`: A random alphanumeric string, without vowels, of length 5. + // +optional + Template *string `json:"template,omitempty"` +} + // WorkersClass is a collection of deployment classes. type WorkersClass struct { // MachineDeployments is a list of machine deployment classes that can be used to create // a set of worker nodes. // +optional MachineDeployments []MachineDeploymentClass `json:"machineDeployments,omitempty"` + + // MachinePools is a list of machine pool classes that can be used to create + // a set of worker nodes. + // +optional + MachinePools []MachinePoolClass `json:"machinePools,omitempty"` } // MachineDeploymentClass serves as a template to define a set of worker nodes of the cluster @@ -154,6 +179,10 @@ type MachineDeploymentClass struct { // +optional FailureDomain *string `json:"failureDomain,omitempty"` + // NamingStrategy allows changing the naming pattern used when creating the MachineDeployment. + // +optional + NamingStrategy *MachineDeploymentClassNamingStrategy `json:"namingStrategy,omitempty"` + // NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. // The default value is 0, meaning that the node can be drained without any time limitations. // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` @@ -204,11 +233,27 @@ type MachineDeploymentClassTemplate struct { Infrastructure LocalObjectTemplate `json:"infrastructure"` } +// MachineDeploymentClassNamingStrategy defines the naming strategy for machine deployment objects. +type MachineDeploymentClassNamingStrategy struct { + // Template defines the template to use for generating the name of the MachineDeployment object. + // If not defined, it will fallback to `{{ .cluster.name }}-{{ .machineDeployment.topologyName }}-{{ .random }}`. + // If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + // get concatenated with a random suffix of length 5. + // The templating mechanism provides the following arguments: + // * `.cluster.name`: The name of the cluster object. + // * `.random`: A random alphanumeric string, without vowels, of length 5. + // * `.machineDeployment.topologyName`: The name of the MachineDeployment topology (Cluster.spec.topology.workers.machineDeployments[].name). + // +optional + Template *string `json:"template,omitempty"` +} + // MachineHealthCheckClass defines a MachineHealthCheck for a group of Machines. type MachineHealthCheckClass struct { // UnhealthyConditions contains a list of the conditions that determine // whether a node is considered unhealthy. The conditions are combined in a // logical OR, i.e. if any of the conditions is met, the node is unhealthy. + // + // +optional UnhealthyConditions []UnhealthyCondition `json:"unhealthyConditions,omitempty"` // Any further remediation is only allowed if at most "MaxUnhealthy" machines selected by @@ -241,6 +286,87 @@ type MachineHealthCheckClass struct { RemediationTemplate *corev1.ObjectReference `json:"remediationTemplate,omitempty"` } +// MachinePoolClass serves as a template to define a pool of worker nodes of the cluster +// provisioned using `ClusterClass`. +type MachinePoolClass struct { + // Class denotes a type of machine pool present in the cluster, + // this name MUST be unique within a ClusterClass and can be referenced + // in the Cluster to create a managed MachinePool. + Class string `json:"class"` + + // Template is a local struct containing a collection of templates for creation of + // MachinePools objects representing a pool of worker nodes. + Template MachinePoolClassTemplate `json:"template"` + + // FailureDomains is the list of failure domains the MachinePool should be attached to. + // Must match a key in the FailureDomains map stored on the cluster object. + // NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. + // +optional + FailureDomains []string `json:"failureDomains,omitempty"` + + // NamingStrategy allows changing the naming pattern used when creating the MachinePool. + // +optional + NamingStrategy *MachinePoolClassNamingStrategy `json:"namingStrategy,omitempty"` + + // NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + // The default value is 0, meaning that the node can be drained without any time limitations. + // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. + // +optional + NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + + // NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + // NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. + // +optional + NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + + // NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + // hosts after the Machine Pool is marked for deletion. A duration of 0 will retry deletion indefinitely. + // Defaults to 10 seconds. + // NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. + // +optional + NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + + // Minimum number of seconds for which a newly created machine pool should + // be ready. + // Defaults to 0 (machine will be considered available as soon as it + // is ready) + // NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` +} + +// MachinePoolClassTemplate defines how a MachinePool generated from a MachinePoolClass +// should look like. +type MachinePoolClassTemplate struct { + // Metadata is the metadata applied to the MachinePool. + // At runtime this metadata is merged with the corresponding metadata from the topology. + // +optional + Metadata ObjectMeta `json:"metadata,omitempty"` + + // Bootstrap contains the bootstrap template reference to be used + // for the creation of the Machines in the MachinePool. + Bootstrap LocalObjectTemplate `json:"bootstrap"` + + // Infrastructure contains the infrastructure template reference to be used + // for the creation of the MachinePool. + Infrastructure LocalObjectTemplate `json:"infrastructure"` +} + +// MachinePoolClassNamingStrategy defines the naming strategy for machine pool objects. +type MachinePoolClassNamingStrategy struct { + // Template defines the template to use for generating the name of the MachinePool object. + // If not defined, it will fallback to `{{ .cluster.name }}-{{ .machinePool.topologyName }}-{{ .random }}`. + // If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + // get concatenated with a random suffix of length 5. + // The templating mechanism provides the following arguments: + // * `.cluster.name`: The name of the cluster object. + // * `.random`: A random alphanumeric string, without vowels, of length 5. + // * `.machinePool.topologyName`: The name of the MachinePool topology (Cluster.spec.topology.workers.machinePools[].name). + // +optional + Template *string `json:"template,omitempty"` +} + // IsZero returns true if none of the values of MachineHealthCheckClass are defined. func (m MachineHealthCheckClass) IsZero() bool { return reflect.ValueOf(m).IsZero() @@ -258,10 +384,32 @@ type ClusterClassVariable struct { // required, this will be specified inside the schema. Required bool `json:"required"` + // Metadata is the metadata of a variable. + // It can be used to add additional data for higher level tools to + // a ClusterClassVariable. + // +optional + Metadata ClusterClassVariableMetadata `json:"metadata,omitempty"` + // Schema defines the schema of the variable. Schema VariableSchema `json:"schema"` } +// ClusterClassVariableMetadata is the metadata of a variable. +// It can be used to add additional data for higher level tools to +// a ClusterClassVariable. +type ClusterClassVariableMetadata struct { + // Map of string keys and values that can be used to organize and categorize + // (scope and select) variables. + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations is an unstructured key value map that can be used to store and + // retrieve arbitrary metadata. + // They are not queryable. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + // VariableSchema defines the schema of a variable. type VariableSchema struct { // OpenAPIV3Schema defines the schema of a variable via OpenAPI v3 @@ -469,6 +617,11 @@ type PatchSelectorMatch struct { // .spec.workers.machineDeployments. // +optional MachineDeploymentClass *PatchSelectorMatchMachineDeploymentClass `json:"machineDeploymentClass,omitempty"` + + // MachinePoolClass selects templates referenced in specific MachinePoolClasses in + // .spec.workers.machinePools. + // +optional + MachinePoolClass *PatchSelectorMatchMachinePoolClass `json:"machinePoolClass,omitempty"` } // PatchSelectorMatchMachineDeploymentClass selects templates referenced @@ -479,6 +632,14 @@ type PatchSelectorMatchMachineDeploymentClass struct { Names []string `json:"names,omitempty"` } +// PatchSelectorMatchMachinePoolClass selects templates referenced +// in specific MachinePoolClasses in .spec.workers.machinePools. +type PatchSelectorMatchMachinePoolClass struct { + // Names selects templates by class names. + // +optional + Names []string `json:"names,omitempty"` +} + // JSONPatch defines a JSON patch. type JSONPatch struct { // Op defines the operation of the patch. @@ -596,6 +757,12 @@ type ClusterClassStatusVariableDefinition struct { // required, this will be specified inside the schema. Required bool `json:"required"` + // Metadata is the metadata of a variable. + // It can be used to add additional data for higher level tools to + // a ClusterClassVariable. + // +optional + Metadata ClusterClassVariableMetadata `json:"metadata,omitempty"` + // Schema defines the schema of the variable. Schema VariableSchema `json:"schema"` } @@ -622,5 +789,5 @@ type ClusterClassList struct { } func init() { - SchemeBuilder.Register(&ClusterClass{}, &ClusterClassList{}) + objectTypes = append(objectTypes, &ClusterClass{}, &ClusterClassList{}) } diff --git a/api/v1beta1/common_types.go b/api/v1beta1/common_types.go index 9c201109e613..28fd0329644f 100644 --- a/api/v1beta1/common_types.go +++ b/api/v1beta1/common_types.go @@ -18,7 +18,10 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" + apivalidation "k8s.io/apimachinery/pkg/api/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/util/validation/field" ) const ( @@ -53,10 +56,22 @@ const ( // will not be completed until the annotation is removed and all MachineDeployments are upgraded. ClusterTopologyDeferUpgradeAnnotation = "topology.cluster.x-k8s.io/defer-upgrade" + // ClusterTopologyUpgradeConcurrencyAnnotation can be set as top-level annotation on the Cluster object of + // a classy Cluster to define the maximum concurrency while upgrading MachineDeployments. + ClusterTopologyUpgradeConcurrencyAnnotation = "topology.cluster.x-k8s.io/upgrade-concurrency" + + // ClusterTopologyMachinePoolNameLabel is the label set on the generated MachinePool objects + // to track the name of the MachinePool topology it represents. + ClusterTopologyMachinePoolNameLabel = "topology.cluster.x-k8s.io/pool-name" + // ClusterTopologyUnsafeUpdateClassNameAnnotation can be used to disable the webhook check on // update that disallows a pre-existing Cluster to be populated with Topology information and Class. ClusterTopologyUnsafeUpdateClassNameAnnotation = "unsafe.topology.cluster.x-k8s.io/disable-update-class-name-check" + // ClusterTopologyUnsafeUpdateVersionAnnotation can be used to disable the webhook checks on + // update that disallows updating the .topology.spec.version on certain conditions. + ClusterTopologyUnsafeUpdateVersionAnnotation = "unsafe.topology.cluster.x-k8s.io/disable-update-version-check" + // ProviderNameLabel is the label set on components in the provider manifest. // This label allows to easily identify all the components belonging to a provider; the clusterctl // tool uses this label for implementing provider's lifecycle operations. @@ -74,6 +89,9 @@ const ( // OwnerKindAnnotation is the annotation set on nodes identifying the owner kind. OwnerKindAnnotation = "cluster.x-k8s.io/owner-kind" + // LabelsFromMachineAnnotation is the annotation set on nodes to track the labels originated from machines. + LabelsFromMachineAnnotation = "cluster.x-k8s.io/labels-from-machine" + // OwnerNameAnnotation is the annotation set on nodes identifying the owner name. OwnerNameAnnotation = "cluster.x-k8s.io/owner-name" @@ -110,6 +128,21 @@ const ( // MachineSkipRemediationAnnotation is the annotation used to mark the machines that should not be considered for remediation by MachineHealthCheck reconciler. MachineSkipRemediationAnnotation = "cluster.x-k8s.io/skip-remediation" + // RemediateMachineAnnotation is the annotation used to mark machines that should be remediated by MachineHealthCheck reconciler. + RemediateMachineAnnotation = "cluster.x-k8s.io/remediate-machine" + + // MachineSetSkipPreflightChecksAnnotation is the annotation used to provide a comma-separated list of + // preflight checks that should be skipped during the MachineSet reconciliation. + // Supported items are: + // - KubeadmVersion (skips the kubeadm version skew preflight check) + // - KubernetesVersion (skips the kubernetes version skew preflight check) + // - ControlPlaneStable (skips checking that the control plane is neither provisioning nor upgrading) + // - All (skips all preflight checks) + // Example: "machineset.cluster.x-k8s.io/skip-preflight-checks": "ControlPlaneStable,KubernetesVersion". + // Note: The annotation can also be set on a MachineDeployment as MachineDeployment annotations are synced to + // the MachineSet. + MachineSetSkipPreflightChecksAnnotation = "machineset.cluster.x-k8s.io/skip-preflight-checks" + // ClusterSecretType defines the type of secret created by core components. // Note: This is used by core CAPI, CAPBK, and KCP to determine whether a secret is created by the controllers // themselves or supplied by the user (e.g. bring your own certificates). @@ -141,11 +174,81 @@ const ( // This annotation can be used to inform MachinePool status during in-progress scaling scenarios. ReplicasManagedByAnnotation = "cluster.x-k8s.io/replicas-managed-by" + // AutoscalerMinSizeAnnotation defines the minimum node group size. + // The annotation is used by autoscaler. + // The annotation is copied from kubernetes/autoscaler. + // Ref:https://github.com/kubernetes/autoscaler/blob/d8336cca37dbfa5d1cb7b7e453bd511172d6e5e7/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go#L256-L259 + // Note: With the Kubernetes autoscaler it is possible to use different annotations by configuring a different + // "Cluster API group" than "cluster.x-k8s.io" via the "CAPI_GROUP" environment variable. + // We only handle the default group in our implementation. + // Note: It can be used by setting as top level annotation on MachineDeployment and MachineSets. + AutoscalerMinSizeAnnotation = "cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size" + + // AutoscalerMaxSizeAnnotation defines the maximum node group size. + // The annotations is used by the autoscaler. + // The annotation definition is copied from kubernetes/autoscaler. + // Ref:https://github.com/kubernetes/autoscaler/blob/d8336cca37dbfa5d1cb7b7e453bd511172d6e5e7/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go#L264-L267 + // Note: With the Kubernetes autoscaler it is possible to use different annotations by configuring a different + // "Cluster API group" than "cluster.x-k8s.io" via the "CAPI_GROUP" environment variable. + // We only handle the default group in our implementation. + // Note: It can be used by setting as top level annotation on MachineDeployment and MachineSets. + AutoscalerMaxSizeAnnotation = "cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size" + // VariableDefinitionFromInline indicates a patch or variable was defined in the `.spec` of a ClusterClass // rather than from an external patch extension. VariableDefinitionFromInline = "inline" ) +// MachineSetPreflightCheck defines a valid MachineSet preflight check. +type MachineSetPreflightCheck string + +const ( + // MachineSetPreflightCheckAll can be used to represent all the MachineSet preflight checks. + MachineSetPreflightCheckAll MachineSetPreflightCheck = "All" + + // MachineSetPreflightCheckKubeadmVersionSkew is the name of the preflight check + // that verifies if the machine being created or remediated for the MachineSet conforms to the kubeadm version + // skew policy that requires the machine to be at the same version as the control plane. + // Note: This is a stopgap while the root cause of the problem is fixed in kubeadm; this check will become + // a no-op when this check will be available in kubeadm, and then eventually be dropped when all the + // supported Kuberenetes/kubeadm versions have implemented the fix. + // The preflight check is only run if a ControlPlane is used (controlPlaneRef must exist in the Cluster), + // the ControlPlane has a version, the MachineSet has a version and the MachineSet uses the Kubeadm bootstrap + // provider. + MachineSetPreflightCheckKubeadmVersionSkew MachineSetPreflightCheck = "KubeadmVersionSkew" + + // MachineSetPreflightCheckKubernetesVersionSkew is the name of the preflight check that verifies + // if the machines being created or remediated for the MachineSet conform to the Kubernetes version skew policy + // that requires the machines to be at a version that is not more than 2 minor lower than the ControlPlane version. + // The preflight check is only run if a ControlPlane is used (controlPlaneRef must exist in the Cluster), + // the ControlPlane has a version and the MachineSet has a version. + MachineSetPreflightCheckKubernetesVersionSkew MachineSetPreflightCheck = "KubernetesVersionSkew" + + // MachineSetPreflightCheckControlPlaneIsStable is the name of the preflight check + // that verifies if the control plane is not provisioning and not upgrading. + // The preflight check is only run if a ControlPlane is used (controlPlaneRef must exist in the Cluster) + // and the ControlPlane has a version. + MachineSetPreflightCheckControlPlaneIsStable MachineSetPreflightCheck = "ControlPlaneIsStable" +) + +// NodeOutdatedRevisionTaint can be added to Nodes at rolling updates in general triggered by updating MachineDeployment +// This taint is used to prevent unnecessary pod churn, i.e., as the first node is drained, pods previously running on +// that node are scheduled onto nodes who have yet to be replaced, but will be torn down soon. +var NodeOutdatedRevisionTaint = corev1.Taint{ + Key: "node.cluster.x-k8s.io/outdated-revision", + Effect: corev1.TaintEffectPreferNoSchedule, +} + +// NodeUninitializedTaint can be added to Nodes at creation by the bootstrap provider, e.g. the +// KubeadmBootstrap provider will add the taint. +// This taint is used to prevent workloads to be scheduled on Nodes before the node is initialized by Cluster API. +// As of today the Node initialization consists of syncing labels from Machines to Nodes. Once the labels +// have been initially synced the taint is removed from the Node. +var NodeUninitializedTaint = corev1.Taint{ + Key: "node.cluster.x-k8s.io/uninitialized", + Effect: corev1.TaintEffectNoSchedule, +} + const ( // TemplateSuffix is the object kind suffix used by template types. TemplateSuffix = "Template" @@ -216,3 +319,16 @@ type ObjectMeta struct { // +optional Annotations map[string]string `json:"annotations,omitempty"` } + +// Validate validates the labels and annotations in ObjectMeta. +func (metadata *ObjectMeta) Validate(parent *field.Path) field.ErrorList { + allErrs := metav1validation.ValidateLabels( + metadata.Labels, + parent.Child("labels"), + ) + allErrs = append(allErrs, apivalidation.ValidateAnnotations( + metadata.Annotations, + parent.Child("annotations"), + )...) + return allErrs +} diff --git a/api/v1beta1/condition_consts.go b/api/v1beta1/condition_consts.go index c0c5de162efa..57d8324c1864 100644 --- a/api/v1beta1/condition_consts.go +++ b/api/v1beta1/condition_consts.go @@ -152,6 +152,10 @@ const ( // MachineHasFailureReason is the reason used when a machine has either a FailureReason or a FailureMessage set on its status. MachineHasFailureReason = "MachineHasFailure" + // HasRemediateMachineAnnotationReason is the reason that get's set at the MachineHealthCheckSucceededCondition when a machine + // has the RemediateMachineAnnotation set. + HasRemediateMachineAnnotationReason = "HasRemediateMachineAnnotation" + // NodeStartupTimeoutReason is the reason used when a machine's node does not appear within the specified timeout. NodeStartupTimeoutReason = "NodeStartupTimeout" @@ -207,6 +211,11 @@ const ( // NodeConditionsFailedReason (Severity=Warning) documents a node is not in a healthy state due to the failed state of at least 1 Kubelet condition. NodeConditionsFailedReason = "NodeConditionsFailed" + + // NodeInspectionFailedReason documents a failure in inspecting the node. + // This reason is used when the Machine controller is unable to list Nodes to find + // the corresponding Node for a Machine by ProviderID. + NodeInspectionFailedReason = "NodeInspectionFailed" ) // Conditions and condition Reasons for the MachineHealthCheck object. @@ -228,6 +237,14 @@ const ( // machines required (i.e. Spec.Replicas-MaxUnavailable when MachineDeploymentStrategyType = RollingUpdate) are up and running for at least minReadySeconds. MachineDeploymentAvailableCondition ConditionType = "Available" + // MachineSetReadyCondition reports a summary of current status of the MachineSet owned by the MachineDeployment. + MachineSetReadyCondition ConditionType = "MachineSetReady" + + // WaitingForMachineSetFallbackReason (Severity=Info) documents a MachineDeployment waiting for the underlying MachineSet + // to be available. + // NOTE: This reason is used only as a fallback when the MachineSet object is not reporting its own ready condition. + WaitingForMachineSetFallbackReason = "WaitingForMachineSet" + // WaitingForAvailableMachinesReason (Severity=Warning) reflects the fact that the required minimum number of machines for a machinedeployment are not available. WaitingForAvailableMachinesReason = "WaitingForAvailableMachines" ) @@ -243,6 +260,10 @@ const ( // MachinesReadyCondition reports an aggregate of current status of the machines controlled by the MachineSet. MachinesReadyCondition ConditionType = "MachinesReady" + // PreflightCheckFailedReason (Severity=Error) documents a MachineSet failing preflight checks + // to create machine(s). + PreflightCheckFailedReason = "PreflightCheckFailed" + // BootstrapTemplateCloningFailedReason (Severity=Error) documents a MachineSet failing to // clone the bootstrap template. BootstrapTemplateCloningFailedReason = "BootstrapTemplateCloningFailed" @@ -282,6 +303,11 @@ const ( // not yet completed because Control Plane is not yet updated to match the desired topology spec. TopologyReconciledControlPlaneUpgradePendingReason = "ControlPlaneUpgradePending" + // TopologyReconciledMachineDeploymentsCreatePendingReason (Severity=Info) documents reconciliation of a Cluster topology + // not yet completed because at least one of the MachineDeployments is yet to be created. + // This generally happens because new MachineDeployment creations are held off while the ControlPlane is not stable. + TopologyReconciledMachineDeploymentsCreatePendingReason = "MachineDeploymentsCreatePending" + // TopologyReconciledMachineDeploymentsUpgradePendingReason (Severity=Info) documents reconciliation of a Cluster topology // not yet completed because at least one of the MachineDeployments is not yet updated to match the desired topology spec. TopologyReconciledMachineDeploymentsUpgradePendingReason = "MachineDeploymentsUpgradePending" @@ -290,6 +316,19 @@ const ( // not yet completed because the upgrade for at least one of the MachineDeployments has been deferred. TopologyReconciledMachineDeploymentsUpgradeDeferredReason = "MachineDeploymentsUpgradeDeferred" + // TopologyReconciledMachinePoolsUpgradePendingReason (Severity=Info) documents reconciliation of a Cluster topology + // not yet completed because at least one of the MachinePools is not yet updated to match the desired topology spec. + TopologyReconciledMachinePoolsUpgradePendingReason = "MachinePoolsUpgradePending" + + // TopologyReconciledMachinePoolsCreatePendingReason (Severity=Info) documents reconciliation of a Cluster topology + // not yet completed because at least one of the MachinePools is yet to be created. + // This generally happens because new MachinePool creations are held off while the ControlPlane is not stable. + TopologyReconciledMachinePoolsCreatePendingReason = "MachinePoolsCreatePending" + + // TopologyReconciledMachinePoolsUpgradeDeferredReason (Severity=Info) documents reconciliation of a Cluster topology + // not yet completed because the upgrade for at least one of the MachinePools has been deferred. + TopologyReconciledMachinePoolsUpgradeDeferredReason = "MachinePoolsUpgradeDeferred" + // TopologyReconciledHookBlockingReason (Severity=Info) documents reconciliation of a Cluster topology // not yet completed because at least one of the lifecycle hooks is blocking. TopologyReconciledHookBlockingReason = "LifecycleHookBlocking" diff --git a/api/v1beta1/groupversion_info.go b/api/v1beta1/groupversion_info.go index 2d118b8de8f7..95968743c68f 100644 --- a/api/v1beta1/groupversion_info.go +++ b/api/v1beta1/groupversion_info.go @@ -20,17 +20,26 @@ limitations under the License. package v1beta1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( // GroupVersion is group version used to register these objects. GroupVersion = schema.GroupVersion{Group: "cluster.x-k8s.io", Version: "v1beta1"} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme. - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + // schemeBuilder is used to add go types to the GroupVersionKind scheme. + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme + AddToScheme = schemeBuilder.AddToScheme + + objectTypes = []runtime.Object{} ) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, objectTypes...) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/api/v1beta1/index/.import-restrictions b/api/v1beta1/index/.import-restrictions new file mode 100644 index 000000000000..b3ee9a15dbda --- /dev/null +++ b/api/v1beta1/index/.import-restrictions @@ -0,0 +1,5 @@ +rules: + - selectorRegexp: sigs[.]k8s[.]io/controller-runtime + allowedPrefixes: + - "sigs.k8s.io/controller-runtime" + forbiddenPrefixes: [] diff --git a/api/v1beta1/index/index.go b/api/v1beta1/index/index.go index 69f3278324e9..c6a17bf175bc 100644 --- a/api/v1beta1/index/index.go +++ b/api/v1beta1/index/index.go @@ -41,5 +41,15 @@ func AddDefaultIndexes(ctx context.Context, mgr ctrl.Manager) error { } } + if feature.Gates.Enabled(feature.MachinePool) { + if err := ByMachinePoolNode(ctx, mgr); err != nil { + return err + } + + if err := ByMachinePoolProviderID(ctx, mgr); err != nil { + return err + } + } + return nil } diff --git a/api/v1beta1/index/machine.go b/api/v1beta1/index/machine.go index efffee9e27c3..28d6fce5352f 100644 --- a/api/v1beta1/index/machine.go +++ b/api/v1beta1/index/machine.go @@ -21,12 +21,11 @@ import ( "fmt" "github.com/pkg/errors" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/controllers/noderefutil" ) const ( @@ -82,14 +81,11 @@ func machineByProviderID(o client.Object) []string { panic(fmt.Sprintf("Expected a Machine but got a %T", o)) } - if pointer.StringDeref(machine.Spec.ProviderID, "") == "" { - return nil - } + providerID := ptr.Deref(machine.Spec.ProviderID, "") - providerID, err := noderefutil.NewProviderID(*machine.Spec.ProviderID) - if err != nil { - // Failed to create providerID, skipping. + if providerID == "" { return nil } - return []string{providerID.IndexKey()} + + return []string{providerID} } diff --git a/api/v1beta1/index/machine_test.go b/api/v1beta1/index/machine_test.go index 33ed9b1dda77..2aa0061503f2 100644 --- a/api/v1beta1/index/machine_test.go +++ b/api/v1beta1/index/machine_test.go @@ -21,11 +21,10 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/controllers/noderefutil" ) func TestIndexMachineByNodeName(t *testing.T) { @@ -62,9 +61,7 @@ func TestIndexMachineByNodeName(t *testing.T) { } func TestIndexMachineByProviderID(t *testing.T) { - validProviderID, err := noderefutil.NewProviderID("aws://region/zone/id") - g := NewWithT(t) - g.Expect(err).ToNot(HaveOccurred()) + validProviderID := "aws://region/zone/id" testCases := []struct { name string @@ -80,7 +77,7 @@ func TestIndexMachineByProviderID(t *testing.T) { name: "Machine has invalid providerID", object: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - ProviderID: pointer.String("invalid"), + ProviderID: ptr.To(""), }, }, expected: nil, @@ -89,10 +86,10 @@ func TestIndexMachineByProviderID(t *testing.T) { name: "Machine has valid providerID", object: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - ProviderID: pointer.String(validProviderID.String()), + ProviderID: ptr.To(validProviderID), }, }, - expected: []string{validProviderID.IndexKey()}, + expected: []string{validProviderID}, }, } diff --git a/api/v1beta1/index/machinepool.go b/api/v1beta1/index/machinepool.go new file mode 100644 index 000000000000..b877d5cbdaca --- /dev/null +++ b/api/v1beta1/index/machinepool.go @@ -0,0 +1,103 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" +) + +const ( + // MachinePoolNodeNameField is used by the MachinePool Controller to index MachinePools by Node name, and add a watch on Nodes. + MachinePoolNodeNameField = "status.nodeRefs.name" + + // MachinePoolProviderIDField is used to index MachinePools by ProviderID. It's useful to find MachinePools + // in a management cluster from Nodes in a workload cluster. + MachinePoolProviderIDField = "spec.providerIDList" +) + +// ByMachinePoolNode adds the machinepool node name index to the +// managers cache. +func ByMachinePoolNode(ctx context.Context, mgr ctrl.Manager) error { + if err := mgr.GetCache().IndexField(ctx, &expv1.MachinePool{}, + MachinePoolNodeNameField, + MachinePoolByNodeName, + ); err != nil { + return errors.Wrap(err, "error setting index field") + } + + return nil +} + +// MachinePoolByNodeName contains the logic to index MachinePools by Node name. +func MachinePoolByNodeName(o client.Object) []string { + machinepool, ok := o.(*expv1.MachinePool) + if !ok { + panic(fmt.Sprintf("Expected a MachinePool but got a %T", o)) + } + + if len(machinepool.Status.NodeRefs) == 0 { + return nil + } + + nodeNames := make([]string, 0, len(machinepool.Status.NodeRefs)) + for _, ref := range machinepool.Status.NodeRefs { + nodeNames = append(nodeNames, ref.Name) + } + return nodeNames +} + +// ByMachinePoolProviderID adds the machinepool providerID index to the +// managers cache. +func ByMachinePoolProviderID(ctx context.Context, mgr ctrl.Manager) error { + if err := mgr.GetCache().IndexField(ctx, &expv1.MachinePool{}, + MachinePoolProviderIDField, + machinePoolByProviderID, + ); err != nil { + return errors.Wrap(err, "error setting index field") + } + + return nil +} + +func machinePoolByProviderID(o client.Object) []string { + machinepool, ok := o.(*expv1.MachinePool) + if !ok { + panic(fmt.Sprintf("Expected a MachinePool but got a %T", o)) + } + + if len(machinepool.Spec.ProviderIDList) == 0 { + return nil + } + + providerIDs := make([]string, 0, len(machinepool.Spec.ProviderIDList)) + for _, id := range machinepool.Spec.ProviderIDList { + if id == "" { + // Valid providerID not found, skipping. + continue + } + providerIDs = append(providerIDs, id) + } + + return providerIDs +} diff --git a/api/v1beta1/index/machinepool_test.go b/api/v1beta1/index/machinepool_test.go new file mode 100644 index 000000000000..09ab8769acc8 --- /dev/null +++ b/api/v1beta1/index/machinepool_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package index + +import ( + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" +) + +func TestIndexMachinePoolByNodeName(t *testing.T) { + testCases := []struct { + name string + object client.Object + expected []string + }{ + { + name: "when the machinepool has no NodeRef", + object: &expv1.MachinePool{}, + expected: []string{}, + }, + { + name: "when the machinepool has valid NodeRefs", + object: &expv1.MachinePool{ + Status: expv1.MachinePoolStatus{ + NodeRefs: []corev1.ObjectReference{ + { + Name: "node1", + }, + { + Name: "node2", + }, + }, + }, + }, + expected: []string{"node1", "node2"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + got := MachinePoolByNodeName(tc.object) + g.Expect(got).To(ConsistOf(tc.expected)) + }) + } +} + +func TestIndexMachinePoolByProviderID(t *testing.T) { + validProviderID := "aws://region/zone/1" + otherValidProviderID := "aws://region/zone/2" + + testCases := []struct { + name string + object client.Object + expected []string + }{ + { + name: "MachinePool has no providerID", + object: &expv1.MachinePool{}, + expected: nil, + }, + { + name: "MachinePool has invalid providerID", + object: &expv1.MachinePool{ + Spec: expv1.MachinePoolSpec{ + ProviderIDList: []string{""}, + }, + }, + expected: []string{}, + }, + { + name: "MachinePool has valid providerIDs", + object: &expv1.MachinePool{ + Spec: expv1.MachinePoolSpec{ + ProviderIDList: []string{validProviderID, otherValidProviderID}, + }, + }, + expected: []string{validProviderID, otherValidProviderID}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + got := machinePoolByProviderID(tc.object) + g.Expect(got).To(BeEquivalentTo(tc.expected)) + }) + } +} diff --git a/api/v1beta1/index/node.go b/api/v1beta1/index/node.go index 6eed2de6545f..64fa4af29065 100644 --- a/api/v1beta1/index/node.go +++ b/api/v1beta1/index/node.go @@ -21,8 +21,6 @@ import ( corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" - - "sigs.k8s.io/cluster-api/controllers/noderefutil" ) const ( @@ -42,10 +40,5 @@ func NodeByProviderID(o client.Object) []string { return nil } - providerID, err := noderefutil.NewProviderID(node.Spec.ProviderID) - if err != nil { - // Failed to create providerID, skipping. - return nil - } - return []string{providerID.IndexKey()} + return []string{node.Spec.ProviderID} } diff --git a/api/v1beta1/index/node_test.go b/api/v1beta1/index/node_test.go index 8137c3f99026..a0e69d536bea 100644 --- a/api/v1beta1/index/node_test.go +++ b/api/v1beta1/index/node_test.go @@ -22,14 +22,10 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" - - "sigs.k8s.io/cluster-api/controllers/noderefutil" ) func TestIndexNodeByProviderID(t *testing.T) { - validProviderID, err := noderefutil.NewProviderID("aws://region/zone/id") - g := NewWithT(t) - g.Expect(err).ToNot(HaveOccurred()) + validProviderID := "aws://region/zone/id" testCases := []struct { name string @@ -45,7 +41,7 @@ func TestIndexNodeByProviderID(t *testing.T) { name: "Node has invalid providerID", object: &corev1.Node{ Spec: corev1.NodeSpec{ - ProviderID: "invalid", + ProviderID: "", }, }, expected: nil, @@ -54,10 +50,10 @@ func TestIndexNodeByProviderID(t *testing.T) { name: "Node has valid providerID", object: &corev1.Node{ Spec: corev1.NodeSpec{ - ProviderID: validProviderID.String(), + ProviderID: validProviderID, }, }, - expected: []string{validProviderID.IndexKey()}, + expected: []string{validProviderID}, }, } diff --git a/api/v1beta1/machine_types.go b/api/v1beta1/machine_types.go index 21bfa548f652..e6e0fa8fe0cf 100644 --- a/api/v1beta1/machine_types.go +++ b/api/v1beta1/machine_types.go @@ -43,6 +43,10 @@ const ( // MachineDeploymentNameLabel is the label set on machines if they're controlled by MachineDeployment. MachineDeploymentNameLabel = "cluster.x-k8s.io/deployment-name" + // MachinePoolNameLabel is the label indicating the name of the MachinePool a Machine is controlled by. + // Note: The value of this label may be a hash if the MachinePool name is longer than 63 characters. + MachinePoolNameLabel = "cluster.x-k8s.io/pool-name" + // MachineControlPlaneNameLabel is the label set on machines if they're controlled by a ControlPlane. // Note: The value of this label may be a hash if the control plane name is longer than 63 characters. MachineControlPlaneNameLabel = "cluster.x-k8s.io/control-plane-name" @@ -303,5 +307,5 @@ type MachineList struct { } func init() { - SchemeBuilder.Register(&Machine{}, &MachineList{}) + objectTypes = append(objectTypes, &Machine{}, &MachineList{}) } diff --git a/api/v1beta1/machinedeployment_types.go b/api/v1beta1/machinedeployment_types.go index 1ada91dc42ec..13a023d07a63 100644 --- a/api/v1beta1/machinedeployment_types.go +++ b/api/v1beta1/machinedeployment_types.go @@ -66,6 +66,8 @@ const ( // As a result, we use the hash of the machine template while ignoring all in-place mutable fields, i.e. the // machine template with only fields that could trigger a rollout for the machine-template-hash, making it // independent of the changes to any in-place mutable fields. + // A random string is appended at the end of the label value (label value format is "-")) + // to distinguish duplicate MachineSets that have the exact same spec but were created as a result of rolloutAfter. MachineDeploymentUniqueLabel = "machine-template-hash" ) @@ -97,6 +99,15 @@ type MachineDeploymentSpec struct { // +optional Replicas *int32 `json:"replicas,omitempty"` + // RolloutAfter is a field to indicate a rollout should be performed + // after the specified time even if no changes have been made to the + // MachineDeployment. + // Example: In the YAML the time can be specified in the RFC3339 format. + // To specify the rolloutAfter target as March 9, 2023, at 9 am UTC + // use "2023-03-09T09:00:00Z". + // +optional + RolloutAfter *metav1.Time `json:"rolloutAfter,omitempty"` + // Label selector for machines. Existing MachineSets whose machines are // selected by this will be the ones affected by this deployment. // It must match the machine template's labels. @@ -110,10 +121,8 @@ type MachineDeploymentSpec struct { // +optional Strategy *MachineDeploymentStrategy `json:"strategy,omitempty"` - // Minimum number of seconds for which a newly created machine should - // be ready. - // Defaults to 0 (machine will be considered available as soon as it - // is ready) + // MinReadySeconds is the minimum number of seconds for which a Node for a newly created machine should be ready before considering the replica available. + // Defaults to 0 (machine will be considered available as soon as the Node is ready) // +optional MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` @@ -143,8 +152,8 @@ type MachineDeploymentSpec struct { // MachineDeploymentStrategy describes how to replace existing machines // with new ones. type MachineDeploymentStrategy struct { - // Type of deployment. - // Default is RollingUpdate. + // Type of deployment. Allowed values are RollingUpdate and OnDelete. + // The default is RollingUpdate. // +kubebuilder:validation:Enum=RollingUpdate;OnDelete // +optional Type MachineDeploymentStrategyType `json:"type,omitempty"` @@ -328,7 +337,7 @@ type MachineDeploymentList struct { } func init() { - SchemeBuilder.Register(&MachineDeployment{}, &MachineDeploymentList{}) + objectTypes = append(objectTypes, &MachineDeployment{}, &MachineDeploymentList{}) } // GetConditions returns the set of conditions for the machinedeployment. diff --git a/api/v1beta1/machinehealthcheck_types.go b/api/v1beta1/machinehealthcheck_types.go index a8d120351861..d793819febab 100644 --- a/api/v1beta1/machinehealthcheck_types.go +++ b/api/v1beta1/machinehealthcheck_types.go @@ -17,11 +17,21 @@ limitations under the License. package v1beta1 import ( + "time" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) +var ( + // DefaultNodeStartupTimeout is the time allowed for a node to start up. + // Can be made longer as part of spec if required for particular provider. + // 10 minutes should allow the instance to start and the node to join the + // cluster on most providers. + DefaultNodeStartupTimeout = metav1.Duration{Duration: 10 * time.Minute} +) + // ANCHOR: MachineHealthCheckSpec // MachineHealthCheckSpec defines the desired state of MachineHealthCheck. @@ -37,8 +47,8 @@ type MachineHealthCheckSpec struct { // whether a node is considered unhealthy. The conditions are combined in a // logical OR, i.e. if any of the conditions is met, the node is unhealthy. // - // +kubebuilder:validation:MinItems=1 - UnhealthyConditions []UnhealthyCondition `json:"unhealthyConditions"` + // +optional + UnhealthyConditions []UnhealthyCondition `json:"unhealthyConditions,omitempty"` // Any further remediation is only allowed if at most "MaxUnhealthy" machines selected by // "selector" are not healthy. @@ -169,5 +179,5 @@ type MachineHealthCheckList struct { } func init() { - SchemeBuilder.Register(&MachineHealthCheck{}, &MachineHealthCheckList{}) + objectTypes = append(objectTypes, &MachineHealthCheck{}, &MachineHealthCheckList{}) } diff --git a/api/v1beta1/machineset_types.go b/api/v1beta1/machineset_types.go index 4c4710fb79c1..f10e44f4f28d 100644 --- a/api/v1beta1/machineset_types.go +++ b/api/v1beta1/machineset_types.go @@ -41,13 +41,26 @@ type MachineSetSpec struct { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. + // + // Defaults to: + // * if the Kubernetes autoscaler min size and max size annotations are set: + // - if it's a new MachineSet, use min size + // - if the replicas field of the old MachineSet is < min size, use min size + // - if the replicas field of the old MachineSet is > max size, use max size + // - if the replicas field of the old MachineSet is in the (min size, max size) range, keep the value from the oldMS + // * otherwise use 1 + // Note: Defaulting will be run whenever the replicas field is not set: + // * A new MachineSet is created with replicas not set. + // * On an existing MachineSet the replicas field was first set and is now unset. + // Those cases are especially relevant for the following Kubernetes autoscaler use cases: + // * A new MachineSet is created and replicas should be managed by the autoscaler + // * An existing MachineSet which initially wasn't controlled by the autoscaler + // should be later controlled by the autoscaler // +optional - // +kubebuilder:default=1 Replicas *int32 `json:"replicas,omitempty"` - // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. - // Defaults to 0 (machine will be considered available as soon as it is ready) + // MinReadySeconds is the minimum number of seconds for which a Node for a newly created machine should be ready before considering the replica available. + // Defaults to 0 (machine will be considered available as soon as the Node is ready) // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty"` @@ -240,5 +253,5 @@ type MachineSetList struct { } func init() { - SchemeBuilder.Register(&MachineSet{}, &MachineSetList{}) + objectTypes = append(objectTypes, &MachineSet{}, &MachineSetList{}) } diff --git a/api/v1beta1/machineset_webhook.go b/api/v1beta1/machineset_webhook.go deleted file mode 100644 index 75dbfe9d34b8..000000000000 --- a/api/v1beta1/machineset_webhook.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - "strings" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" - - capilabels "sigs.k8s.io/cluster-api/internal/labels" - "sigs.k8s.io/cluster-api/util/version" -) - -func (m *MachineSet) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(m). - Complete() -} - -// +kubebuilder:webhook:verbs=create;update,path=/validate-cluster-x-k8s-io-v1beta1-machineset,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=cluster.x-k8s.io,resources=machinesets,versions=v1beta1,name=validation.machineset.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 -// +kubebuilder:webhook:verbs=create;update,path=/mutate-cluster-x-k8s-io-v1beta1-machineset,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=cluster.x-k8s.io,resources=machinesets,versions=v1beta1,name=default.machineset.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 - -var _ webhook.Defaulter = &MachineSet{} -var _ webhook.Validator = &MachineSet{} - -// Default sets default MachineSet field values. -func (m *MachineSet) Default() { - if m.Labels == nil { - m.Labels = make(map[string]string) - } - m.Labels[ClusterNameLabel] = m.Spec.ClusterName - - if m.Spec.DeletePolicy == "" { - randomPolicy := string(RandomMachineSetDeletePolicy) - m.Spec.DeletePolicy = randomPolicy - } - - if m.Spec.Selector.MatchLabels == nil { - m.Spec.Selector.MatchLabels = make(map[string]string) - } - - if m.Spec.Template.Labels == nil { - m.Spec.Template.Labels = make(map[string]string) - } - - if len(m.Spec.Selector.MatchLabels) == 0 && len(m.Spec.Selector.MatchExpressions) == 0 { - // Note: MustFormatValue is used here as the value of this label will be a hash if the MachineSet name is longer than 63 characters. - m.Spec.Selector.MatchLabels[MachineSetNameLabel] = capilabels.MustFormatValue(m.Name) - m.Spec.Template.Labels[MachineSetNameLabel] = capilabels.MustFormatValue(m.Name) - } - - if m.Spec.Template.Spec.Version != nil && !strings.HasPrefix(*m.Spec.Template.Spec.Version, "v") { - normalizedVersion := "v" + *m.Spec.Template.Spec.Version - m.Spec.Template.Spec.Version = &normalizedVersion - } -} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (m *MachineSet) ValidateCreate() error { - return m.validate(nil) -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (m *MachineSet) ValidateUpdate(old runtime.Object) error { - oldMS, ok := old.(*MachineSet) - if !ok { - return apierrors.NewBadRequest(fmt.Sprintf("expected a MachineSet but got a %T", old)) - } - return m.validate(oldMS) -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (m *MachineSet) ValidateDelete() error { - return nil -} - -func (m *MachineSet) validate(old *MachineSet) error { - var allErrs field.ErrorList - specPath := field.NewPath("spec") - selector, err := metav1.LabelSelectorAsSelector(&m.Spec.Selector) - if err != nil { - allErrs = append( - allErrs, - field.Invalid( - specPath.Child("selector"), - m.Spec.Selector, - err.Error(), - ), - ) - } else if !selector.Matches(labels.Set(m.Spec.Template.Labels)) { - allErrs = append( - allErrs, - field.Invalid( - specPath.Child("template", "metadata", "labels"), - m.Spec.Template.ObjectMeta.Labels, - fmt.Sprintf("must match spec.selector %q", selector.String()), - ), - ) - } - - if old != nil && old.Spec.ClusterName != m.Spec.ClusterName { - allErrs = append( - allErrs, - field.Forbidden( - specPath.Child("clusterName"), - "field is immutable", - ), - ) - } - - if m.Spec.Template.Spec.Version != nil { - if !version.KubeSemver.MatchString(*m.Spec.Template.Spec.Version) { - allErrs = append( - allErrs, - field.Invalid( - specPath.Child("template", "spec", "version"), - *m.Spec.Template.Spec.Version, - "must be a valid semantic version", - ), - ) - } - } - - if len(allErrs) == 0 { - return nil - } - - return apierrors.NewInvalid(GroupVersion.WithKind("MachineSet").GroupKind(), m.Name, allErrs) -} diff --git a/api/v1beta1/machineset_webhook_test.go b/api/v1beta1/machineset_webhook_test.go deleted file mode 100644 index 43fd79a5f31b..000000000000 --- a/api/v1beta1/machineset_webhook_test.go +++ /dev/null @@ -1,220 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "testing" - - . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" - - utildefaulting "sigs.k8s.io/cluster-api/util/defaulting" -) - -func TestMachineSetDefault(t *testing.T) { - g := NewWithT(t) - ms := &MachineSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-ms", - }, - Spec: MachineSetSpec{ - Template: MachineTemplateSpec{ - Spec: MachineSpec{ - Version: pointer.String("1.19.10"), - }, - }, - }, - } - t.Run("for MachineSet", utildefaulting.DefaultValidateTest(ms)) - ms.Default() - - g.Expect(ms.Labels[ClusterNameLabel]).To(Equal(ms.Spec.ClusterName)) - g.Expect(ms.Spec.DeletePolicy).To(Equal(string(RandomMachineSetDeletePolicy))) - g.Expect(ms.Spec.Selector.MatchLabels).To(HaveKeyWithValue(MachineSetNameLabel, "test-ms")) - g.Expect(ms.Spec.Template.Labels).To(HaveKeyWithValue(MachineSetNameLabel, "test-ms")) - g.Expect(*ms.Spec.Template.Spec.Version).To(Equal("v1.19.10")) -} - -func TestMachineSetLabelSelectorMatchValidation(t *testing.T) { - tests := []struct { - name string - selectors map[string]string - labels map[string]string - expectErr bool - }{ - { - name: "should return error on mismatch", - selectors: map[string]string{"foo": "bar"}, - labels: map[string]string{"foo": "baz"}, - expectErr: true, - }, - { - name: "should return error on missing labels", - selectors: map[string]string{"foo": "bar"}, - labels: map[string]string{"": ""}, - expectErr: true, - }, - { - name: "should return error if all selectors don't match", - selectors: map[string]string{"foo": "bar", "hello": "world"}, - labels: map[string]string{"foo": "bar"}, - expectErr: true, - }, - { - name: "should not return error on match", - selectors: map[string]string{"foo": "bar"}, - labels: map[string]string{"foo": "bar"}, - expectErr: false, - }, - { - name: "should return error for invalid selector", - selectors: map[string]string{"-123-foo": "bar"}, - labels: map[string]string{"-123-foo": "bar"}, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - ms := &MachineSet{ - Spec: MachineSetSpec{ - Selector: metav1.LabelSelector{ - MatchLabels: tt.selectors, - }, - Template: MachineTemplateSpec{ - ObjectMeta: ObjectMeta{ - Labels: tt.labels, - }, - }, - }, - } - if tt.expectErr { - g.Expect(ms.ValidateCreate()).NotTo(Succeed()) - g.Expect(ms.ValidateUpdate(ms)).NotTo(Succeed()) - } else { - g.Expect(ms.ValidateCreate()).To(Succeed()) - g.Expect(ms.ValidateUpdate(ms)).To(Succeed()) - } - }) - } -} - -func TestMachineSetClusterNameImmutable(t *testing.T) { - tests := []struct { - name string - oldClusterName string - newClusterName string - expectErr bool - }{ - { - name: "when the cluster name has not changed", - oldClusterName: "foo", - newClusterName: "foo", - expectErr: false, - }, - { - name: "when the cluster name has changed", - oldClusterName: "foo", - newClusterName: "bar", - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - newMS := &MachineSet{ - Spec: MachineSetSpec{ - ClusterName: tt.newClusterName, - }, - } - - oldMS := &MachineSet{ - Spec: MachineSetSpec{ - ClusterName: tt.oldClusterName, - }, - } - - if tt.expectErr { - g.Expect(newMS.ValidateUpdate(oldMS)).NotTo(Succeed()) - } else { - g.Expect(newMS.ValidateUpdate(oldMS)).To(Succeed()) - } - }) - } -} - -func TestMachineSetVersionValidation(t *testing.T) { - tests := []struct { - name string - version string - expectErr bool - }{ - { - name: "should succeed when given a valid semantic version with prepended 'v'", - version: "v1.19.2", - expectErr: false, - }, - { - name: "should return error when given a valid semantic version without 'v'", - version: "1.19.2", - expectErr: true, - }, - { - name: "should return error when given an invalid semantic version", - version: "1", - expectErr: true, - }, - { - name: "should return error when given an invalid semantic version", - version: "v1", - expectErr: true, - }, - { - name: "should return error when given an invalid semantic version", - version: "wrong_version", - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - md := &MachineSet{ - Spec: MachineSetSpec{ - Template: MachineTemplateSpec{ - Spec: MachineSpec{ - Version: pointer.String(tt.version), - }, - }, - }, - } - - if tt.expectErr { - g.Expect(md.ValidateCreate()).NotTo(Succeed()) - g.Expect(md.ValidateUpdate(md)).NotTo(Succeed()) - } else { - g.Expect(md.ValidateCreate()).To(Succeed()) - g.Expect(md.ValidateUpdate(md)).To(Succeed()) - } - }) - } -} diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index e4a3efc07402..cd8e2982e3d9 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. @@ -274,6 +273,7 @@ func (in *ClusterClassStatusVariable) DeepCopy() *ClusterClassStatusVariable { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterClassStatusVariableDefinition) DeepCopyInto(out *ClusterClassStatusVariableDefinition) { *out = *in + in.Metadata.DeepCopyInto(&out.Metadata) in.Schema.DeepCopyInto(&out.Schema) } @@ -290,6 +290,7 @@ func (in *ClusterClassStatusVariableDefinition) DeepCopy() *ClusterClassStatusVa // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterClassVariable) DeepCopyInto(out *ClusterClassVariable) { *out = *in + in.Metadata.DeepCopyInto(&out.Metadata) in.Schema.DeepCopyInto(&out.Schema) } @@ -303,6 +304,35 @@ func (in *ClusterClassVariable) DeepCopy() *ClusterClassVariable { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterClassVariableMetadata) DeepCopyInto(out *ClusterClassVariableMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClassVariableMetadata. +func (in *ClusterClassVariableMetadata) DeepCopy() *ClusterClassVariableMetadata { + if in == nil { + return nil + } + out := new(ClusterClassVariableMetadata) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterList) DeepCopyInto(out *ClusterList) { *out = *in @@ -508,6 +538,11 @@ func (in *ControlPlaneClass) DeepCopyInto(out *ControlPlaneClass) { *out = new(MachineHealthCheckClass) (*in).DeepCopyInto(*out) } + if in.NamingStrategy != nil { + in, out := &in.NamingStrategy, &out.NamingStrategy + *out = new(ControlPlaneClassNamingStrategy) + (*in).DeepCopyInto(*out) + } if in.NodeDrainTimeout != nil { in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout *out = new(metav1.Duration) @@ -535,6 +570,26 @@ func (in *ControlPlaneClass) DeepCopy() *ControlPlaneClass { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneClassNamingStrategy) DeepCopyInto(out *ControlPlaneClassNamingStrategy) { + *out = *in + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneClassNamingStrategy. +func (in *ControlPlaneClassNamingStrategy) DeepCopy() *ControlPlaneClassNamingStrategy { + if in == nil { + return nil + } + out := new(ControlPlaneClassNamingStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControlPlaneTopology) DeepCopyInto(out *ControlPlaneTopology) { *out = *in @@ -912,6 +967,11 @@ func (in *MachineDeploymentClass) DeepCopyInto(out *MachineDeploymentClass) { *out = new(string) **out = **in } + if in.NamingStrategy != nil { + in, out := &in.NamingStrategy, &out.NamingStrategy + *out = new(MachineDeploymentClassNamingStrategy) + (*in).DeepCopyInto(*out) + } if in.NodeDrainTimeout != nil { in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout *out = new(metav1.Duration) @@ -949,6 +1009,26 @@ func (in *MachineDeploymentClass) DeepCopy() *MachineDeploymentClass { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentClassNamingStrategy) DeepCopyInto(out *MachineDeploymentClassNamingStrategy) { + *out = *in + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentClassNamingStrategy. +func (in *MachineDeploymentClassNamingStrategy) DeepCopy() *MachineDeploymentClassNamingStrategy { + if in == nil { + return nil + } + out := new(MachineDeploymentClassNamingStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineDeploymentClassTemplate) DeepCopyInto(out *MachineDeploymentClassTemplate) { *out = *in @@ -1007,6 +1087,10 @@ func (in *MachineDeploymentSpec) DeepCopyInto(out *MachineDeploymentSpec) { *out = new(int32) **out = **in } + if in.RolloutAfter != nil { + in, out := &in.RolloutAfter, &out.RolloutAfter + *out = (*in).DeepCopy() + } in.Selector.DeepCopyInto(&out.Selector) in.Template.DeepCopyInto(&out.Template) if in.Strategy != nil { @@ -1386,6 +1470,163 @@ func (in *MachineList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolClass) DeepCopyInto(out *MachinePoolClass) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NamingStrategy != nil { + in, out := &in.NamingStrategy, &out.NamingStrategy + *out = new(MachinePoolClassNamingStrategy) + (*in).DeepCopyInto(*out) + } + if in.NodeDrainTimeout != nil { + in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.NodeVolumeDetachTimeout != nil { + in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.NodeDeletionTimeout != nil { + in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.MinReadySeconds != nil { + in, out := &in.MinReadySeconds, &out.MinReadySeconds + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolClass. +func (in *MachinePoolClass) DeepCopy() *MachinePoolClass { + if in == nil { + return nil + } + out := new(MachinePoolClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolClassNamingStrategy) DeepCopyInto(out *MachinePoolClassNamingStrategy) { + *out = *in + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolClassNamingStrategy. +func (in *MachinePoolClassNamingStrategy) DeepCopy() *MachinePoolClassNamingStrategy { + if in == nil { + return nil + } + out := new(MachinePoolClassNamingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolClassTemplate) DeepCopyInto(out *MachinePoolClassTemplate) { + *out = *in + in.Metadata.DeepCopyInto(&out.Metadata) + in.Bootstrap.DeepCopyInto(&out.Bootstrap) + in.Infrastructure.DeepCopyInto(&out.Infrastructure) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolClassTemplate. +func (in *MachinePoolClassTemplate) DeepCopy() *MachinePoolClassTemplate { + if in == nil { + return nil + } + out := new(MachinePoolClassTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolTopology) DeepCopyInto(out *MachinePoolTopology) { + *out = *in + in.Metadata.DeepCopyInto(&out.Metadata) + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodeDrainTimeout != nil { + in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.NodeVolumeDetachTimeout != nil { + in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.NodeDeletionTimeout != nil { + in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.MinReadySeconds != nil { + in, out := &in.MinReadySeconds, &out.MinReadySeconds + *out = new(int32) + **out = **in + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = new(MachinePoolVariables) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolTopology. +func (in *MachinePoolTopology) DeepCopy() *MachinePoolTopology { + if in == nil { + return nil + } + out := new(MachinePoolTopology) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolVariables) DeepCopyInto(out *MachinePoolVariables) { + *out = *in + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]ClusterVariable, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolVariables. +func (in *MachinePoolVariables) DeepCopy() *MachinePoolVariables { + if in == nil { + return nil + } + out := new(MachinePoolVariables) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineRollingUpdateDeployment) DeepCopyInto(out *MachineRollingUpdateDeployment) { *out = *in @@ -1744,6 +1985,11 @@ func (in *PatchSelectorMatch) DeepCopyInto(out *PatchSelectorMatch) { *out = new(PatchSelectorMatchMachineDeploymentClass) (*in).DeepCopyInto(*out) } + if in.MachinePoolClass != nil { + in, out := &in.MachinePoolClass, &out.MachinePoolClass + *out = new(PatchSelectorMatchMachinePoolClass) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchSelectorMatch. @@ -1776,6 +2022,26 @@ func (in *PatchSelectorMatchMachineDeploymentClass) DeepCopy() *PatchSelectorMat return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchSelectorMatchMachinePoolClass) DeepCopyInto(out *PatchSelectorMatchMachinePoolClass) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchSelectorMatchMachinePoolClass. +func (in *PatchSelectorMatchMachinePoolClass) DeepCopy() *PatchSelectorMatchMachinePoolClass { + if in == nil { + return nil + } + out := new(PatchSelectorMatchMachinePoolClass) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Topology) DeepCopyInto(out *Topology) { *out = *in @@ -1850,6 +2116,13 @@ func (in *WorkersClass) DeepCopyInto(out *WorkersClass) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.MachinePools != nil { + in, out := &in.MachinePools, &out.MachinePools + *out = make([]MachinePoolClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkersClass. @@ -1872,6 +2145,13 @@ func (in *WorkersTopology) DeepCopyInto(out *WorkersTopology) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.MachinePools != nil { + in, out := &in.MachinePools, &out.MachinePools + *out = make([]MachinePoolTopology, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkersTopology. diff --git a/api/v1beta1/zz_generated.openapi.go b/api/v1beta1/zz_generated.openapi.go index 2b59d54b3395..6cd32864a7ad 100644 --- a/api/v1beta1/zz_generated.openapi.go +++ b/api/v1beta1/zz_generated.openapi.go @@ -19,8 +19,6 @@ limitations under the License. // Code generated by openapi-gen. DO NOT EDIT. -// This file was autogenerated by openapi-gen. Do not edit it manually! - package v1beta1 import ( @@ -41,6 +39,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassStatusVariable": schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassStatusVariable(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassStatusVariableDefinition": schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassStatusVariableDefinition(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassVariable": schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassVariable(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassVariableMetadata": schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassVariableMetadata(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ClusterList": schema_sigsk8sio_cluster_api_api_v1beta1_ClusterList(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ClusterNetwork": schema_sigsk8sio_cluster_api_api_v1beta1_ClusterNetwork(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ClusterSpec": schema_sigsk8sio_cluster_api_api_v1beta1_ClusterSpec(ref), @@ -48,6 +47,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/v1beta1.ClusterVariable": schema_sigsk8sio_cluster_api_api_v1beta1_ClusterVariable(ref), "sigs.k8s.io/cluster-api/api/v1beta1.Condition": schema_sigsk8sio_cluster_api_api_v1beta1_Condition(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClass": schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClass(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClassNamingStrategy": schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClassNamingStrategy(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneTopology": schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneTopology(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ExternalPatchDefinition": schema_sigsk8sio_cluster_api_api_v1beta1_ExternalPatchDefinition(ref), "sigs.k8s.io/cluster-api/api/v1beta1.FailureDomainSpec": schema_sigsk8sio_cluster_api_api_v1beta1_FailureDomainSpec(ref), @@ -59,6 +59,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/v1beta1.MachineAddress": schema_sigsk8sio_cluster_api_api_v1beta1_MachineAddress(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeployment": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeployment(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClass": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClass(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassNamingStrategy": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClassNamingStrategy(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassTemplate": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClassTemplate(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentList": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentList(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentSpec": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentSpec(ref), @@ -73,6 +74,11 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckStatus": schema_sigsk8sio_cluster_api_api_v1beta1_MachineHealthCheckStatus(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckTopology": schema_sigsk8sio_cluster_api_api_v1beta1_MachineHealthCheckTopology(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineList": schema_sigsk8sio_cluster_api_api_v1beta1_MachineList(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolClass": schema_sigsk8sio_cluster_api_api_v1beta1_MachinePoolClass(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolClassNamingStrategy": schema_sigsk8sio_cluster_api_api_v1beta1_MachinePoolClassNamingStrategy(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolClassTemplate": schema_sigsk8sio_cluster_api_api_v1beta1_MachinePoolClassTemplate(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolTopology": schema_sigsk8sio_cluster_api_api_v1beta1_MachinePoolTopology(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolVariables": schema_sigsk8sio_cluster_api_api_v1beta1_MachinePoolVariables(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineRollingUpdateDeployment": schema_sigsk8sio_cluster_api_api_v1beta1_MachineRollingUpdateDeployment(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineSet": schema_sigsk8sio_cluster_api_api_v1beta1_MachineSet(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineSetList": schema_sigsk8sio_cluster_api_api_v1beta1_MachineSetList(ref), @@ -87,12 +93,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelector": schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelector(ref), "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatch": schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatch(ref), "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachineDeploymentClass": schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatchMachineDeploymentClass(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachinePoolClass": schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatchMachinePoolClass(ref), "sigs.k8s.io/cluster-api/api/v1beta1.Topology": schema_sigsk8sio_cluster_api_api_v1beta1_Topology(ref), "sigs.k8s.io/cluster-api/api/v1beta1.UnhealthyCondition": schema_sigsk8sio_cluster_api_api_v1beta1_UnhealthyCondition(ref), "sigs.k8s.io/cluster-api/api/v1beta1.VariableSchema": schema_sigsk8sio_cluster_api_api_v1beta1_VariableSchema(ref), "sigs.k8s.io/cluster-api/api/v1beta1.WorkersClass": schema_sigsk8sio_cluster_api_api_v1beta1_WorkersClass(ref), "sigs.k8s.io/cluster-api/api/v1beta1.WorkersTopology": schema_sigsk8sio_cluster_api_api_v1beta1_WorkersTopology(ref), - "sigs.k8s.io/cluster-api/api/v1beta1.machineDeploymentDefaulter": schema_sigsk8sio_cluster_api_api_v1beta1_machineDeploymentDefaulter(ref), } } @@ -538,6 +544,13 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassStatusVariableDefiniti Format: "", }, }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata is the metadata of a variable. It can be used to add additional data for higher level tools to a ClusterClassVariable.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassVariableMetadata"), + }, + }, "schema": { SchemaProps: spec.SchemaProps{ Description: "Schema defines the schema of the variable.", @@ -550,7 +563,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassStatusVariableDefiniti }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/v1beta1.VariableSchema"}, + "sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassVariableMetadata", "sigs.k8s.io/cluster-api/api/v1beta1.VariableSchema"}, } } @@ -577,6 +590,13 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassVariable(ref common.Re Format: "", }, }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata is the metadata of a variable. It can be used to add additional data for higher level tools to a ClusterClassVariable.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassVariableMetadata"), + }, + }, "schema": { SchemaProps: spec.SchemaProps{ Description: "Schema defines the schema of the variable.", @@ -589,7 +609,52 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassVariable(ref common.Re }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/v1beta1.VariableSchema"}, + "sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassVariableMetadata", "sigs.k8s.io/cluster-api/api/v1beta1.VariableSchema"}, + } +} + +func schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassVariableMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterClassVariableMetadata is the metadata of a variable. It can be used to add additional data for higher level tools to a ClusterClassVariable.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "labels": { + SchemaProps: spec.SchemaProps{ + Description: "Map of string keys and values that can be used to organize and categorize (scope and select) variables.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "annotations": { + SchemaProps: spec.SchemaProps{ + Description: "Annotations is an unstructured key value map that can be used to store and retrieve arbitrary metadata. They are not queryable.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, } } @@ -789,7 +854,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ClusterStatus(ref common.Reference }, "controlPlaneReady": { SchemaProps: spec.SchemaProps{ - Description: "ControlPlaneReady defines if the control plane is ready.", + Description: "ControlPlaneReady denotes if the control plane became ready during initial provisioning to receive requests. NOTE: this field is part of the Cluster API contract and it is used to orchestrate provisioning. The value of this field is never updated after provisioning is completed. Please use conditions to check the operational state of the control plane.", Default: false, Type: []string{"boolean"}, Format: "", @@ -849,7 +914,6 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ClusterVariable(ref common.Referen "value": { SchemaProps: spec.SchemaProps{ Description: "Value of the variable. Note: the value will be validated against the schema of the corresponding ClusterClassVariable from the ClusterClass. Note: We have to use apiextensionsv1.JSON instead of a custom JSON type, because controller-tools has a hard-coded schema for apiextensionsv1.JSON which cannot be produced by another type via controller-tools, i.e. it is not possible to have no type field. Ref: https://github.com/kubernetes-sigs/controller-tools/blob/d0e03a142d0ecdd5491593e941ee1d6b5d91dba6/pkg/crd/known_types.go#L106-L111", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), }, }, @@ -895,7 +959,6 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_Condition(ref common.ReferenceCall "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -954,6 +1017,12 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClass(ref common.Refer Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass"), }, }, + "namingStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "NamingStrategy allows changing the naming pattern used when creating the control plane provider object.", + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClassNamingStrategy"), + }, + }, "nodeDrainTimeout": { SchemaProps: spec.SchemaProps{ Description: "NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology.", @@ -977,7 +1046,27 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClass(ref common.Refer }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, + "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClassNamingStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, + } +} + +func schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClassNamingStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ControlPlaneClassNamingStrategy defines the naming strategy for control plane objects.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template defines the template to use for generating the name of the ControlPlane object. If not defined, it will fallback to `{{ .cluster.name }}-{{ .random }}`. If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will get concatenated with a random suffix of length 5. The templating mechanism provides the following arguments: * `.cluster.name`: The name of the cluster object. * `.random`: A random alphanumeric string, without vowels, of length 5.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, } } @@ -1352,8 +1441,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_JSONSchemaProps(ref common.Referen Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), }, }, }, @@ -1555,6 +1643,12 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClass(ref common. Format: "", }, }, + "namingStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "NamingStrategy allows changing the naming pattern used when creating the MachineDeployment.", + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassNamingStrategy"), + }, + }, "nodeDrainTimeout": { SchemaProps: spec.SchemaProps{ Description: "NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass.", @@ -1591,7 +1685,27 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClass(ref common. }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassNamingStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass"}, + } +} + +func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClassNamingStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachineDeploymentClassNamingStrategy defines the naming strategy for machine deployment objects.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template defines the template to use for generating the name of the MachineDeployment object. If not defined, it will fallback to `{{ .cluster.name }}-{{ .machineDeployment.topologyName }}-{{ .random }}`. If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will get concatenated with a random suffix of length 5. The templating mechanism provides the following arguments: * `.cluster.name`: The name of the cluster object. * `.random`: A random alphanumeric string, without vowels, of length 5. * `.machineDeployment.topologyName`: The name of the MachineDeployment topology (Cluster.spec.topology.workers.machineDeployments[].name).", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, } } @@ -1703,6 +1817,12 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentSpec(ref common.R Format: "int32", }, }, + "rolloutAfter": { + SchemaProps: spec.SchemaProps{ + Description: "RolloutAfter is a field to indicate a rollout should be performed after the specified time even if no changes have been made to the MachineDeployment. Example: In the YAML the time can be specified in the RFC3339 format. To specify the rolloutAfter target as March 9, 2023, at 9 am UTC use \"2023-03-09T09:00:00Z\".", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, "selector": { SchemaProps: spec.SchemaProps{ Description: "Label selector for machines. Existing MachineSets whose machines are selected by this will be the ones affected by this deployment. It must match the machine template's labels.", @@ -1725,7 +1845,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentSpec(ref common.R }, "minReadySeconds": { SchemaProps: spec.SchemaProps{ - Description: "Minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", + Description: "MinReadySeconds is the minimum number of seconds for which a Node for a newly created machine should be ready before considering the replica available. Defaults to 0 (machine will be considered available as soon as the Node is ready)", Type: []string{"integer"}, Format: "int32", }, @@ -1756,7 +1876,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentSpec(ref common.R }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineTemplateSpec"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineTemplateSpec"}, } } @@ -1859,7 +1979,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentStrategy(ref comm Properties: map[string]spec.Schema{ "type": { SchemaProps: spec.SchemaProps{ - Description: "Type of deployment. Default is RollingUpdate.", + Description: "Type of deployment. Allowed values are RollingUpdate and OnDelete. The default is RollingUpdate.", Type: []string{"string"}, Format: "", }, @@ -2217,7 +2337,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineHealthCheckSpec(ref common. }, }, }, - Required: []string{"clusterName", "selector", "unhealthyConditions"}, + Required: []string{"clusterName", "selector"}, }, }, Dependencies: []string{ @@ -2410,6 +2530,261 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineList(ref common.ReferenceCa } } +func schema_sigsk8sio_cluster_api_api_v1beta1_MachinePoolClass(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachinePoolClass serves as a template to define a pool of worker nodes of the cluster provisioned using `ClusterClass`.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "class": { + SchemaProps: spec.SchemaProps{ + Description: "Class denotes a type of machine pool present in the cluster, this name MUST be unique within a ClusterClass and can be referenced in the Cluster to create a managed MachinePool.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template is a local struct containing a collection of templates for creation of MachinePools objects representing a pool of worker nodes.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolClassTemplate"), + }, + }, + "failureDomains": { + SchemaProps: spec.SchemaProps{ + Description: "FailureDomains is the list of failure domains the MachinePool should be attached to. Must match a key in the FailureDomains map stored on the cluster object. NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "namingStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "NamingStrategy allows changing the naming pattern used when creating the MachinePool.", + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolClassNamingStrategy"), + }, + }, + "nodeDrainTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "nodeVolumeDetachTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "nodeDeletionTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine Pool is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "minReadySeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Minimum number of seconds for which a newly created machine pool should be ready. Defaults to 0 (machine will be considered available as soon as it is ready) NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"class", "template"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolClassNamingStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolClassTemplate"}, + } +} + +func schema_sigsk8sio_cluster_api_api_v1beta1_MachinePoolClassNamingStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachinePoolClassNamingStrategy defines the naming strategy for machine pool objects.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template defines the template to use for generating the name of the MachinePool object. If not defined, it will fallback to `{{ .cluster.name }}-{{ .machinePool.topologyName }}-{{ .random }}`. If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will get concatenated with a random suffix of length 5. The templating mechanism provides the following arguments: * `.cluster.name`: The name of the cluster object. * `.random`: A random alphanumeric string, without vowels, of length 5. * `.machinePool.topologyName`: The name of the MachinePool topology (Cluster.spec.topology.workers.machinePools[].name).", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_sigsk8sio_cluster_api_api_v1beta1_MachinePoolClassTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachinePoolClassTemplate defines how a MachinePool generated from a MachinePoolClass should look like.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata is the metadata applied to the MachinePool. At runtime this metadata is merged with the corresponding metadata from the topology.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"), + }, + }, + "bootstrap": { + SchemaProps: spec.SchemaProps{ + Description: "Bootstrap contains the bootstrap template reference to be used for the creation of the Machines in the MachinePool.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate"), + }, + }, + "infrastructure": { + SchemaProps: spec.SchemaProps{ + Description: "Infrastructure contains the infrastructure template reference to be used for the creation of the MachinePool.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate"), + }, + }, + }, + Required: []string{"bootstrap", "infrastructure"}, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, + } +} + +func schema_sigsk8sio_cluster_api_api_v1beta1_MachinePoolTopology(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachinePoolTopology specifies the different parameters for a pool of worker nodes in the topology. This pool of nodes is managed by a MachinePool object whose lifecycle is managed by the Cluster controller.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata is the metadata applied to the MachinePool. At runtime this metadata is merged with the corresponding metadata from the ClusterClass.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"), + }, + }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "Class is the name of the MachinePoolClass used to create the pool of worker nodes. This should match one of the deployment classes defined in the ClusterClass object mentioned in the `Cluster.Spec.Class` field.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the unique identifier for this MachinePoolTopology. The value is used with other unique identifiers to create a MachinePool's Name (e.g. cluster's name, etc). In case the name is greater than the allowed maximum length, the values are hashed together.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "failureDomains": { + SchemaProps: spec.SchemaProps{ + Description: "FailureDomains is the list of failure domains the machine pool will be created in. Must match a key in the FailureDomains map stored on the cluster object.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "nodeDrainTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "nodeVolumeDetachTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "nodeDeletionTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the MachinePool hosts after the MachinePool is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "minReadySeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Minimum number of seconds for which a newly created machine pool should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Replicas is the number of nodes belonging to this pool. If the value is nil, the MachinePool is created without the number of Replicas (defaulting to 1) and it's assumed that an external entity (like cluster autoscaler) is responsible for the management of this value.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "variables": { + SchemaProps: spec.SchemaProps{ + Description: "Variables can be used to customize the MachinePool through patches.", + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolVariables"), + }, + }, + }, + Required: []string{"class", "name"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolVariables", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, + } +} + +func schema_sigsk8sio_cluster_api_api_v1beta1_MachinePoolVariables(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachinePoolVariables can be used to provide variables for a specific MachinePool.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "overrides": { + SchemaProps: spec.SchemaProps{ + Description: "Overrides can be used to override Cluster level variables.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ClusterVariable"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/v1beta1.ClusterVariable"}, + } +} + func schema_sigsk8sio_cluster_api_api_v1beta1_MachineRollingUpdateDeployment(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2557,14 +2932,14 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineSetSpec(ref common.Referenc }, "replicas": { SchemaProps: spec.SchemaProps{ - Description: "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1.", + Description: "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified.\n\nDefaults to: * if the Kubernetes autoscaler min size and max size annotations are set:\n - if it's a new MachineSet, use min size\n - if the replicas field of the old MachineSet is < min size, use min size\n - if the replicas field of the old MachineSet is > max size, use max size\n - if the replicas field of the old MachineSet is in the (min size, max size) range, keep the value from the oldMS\n* otherwise use 1 Note: Defaulting will be run whenever the replicas field is not set: * A new MachineSet is created with replicas not set. * On an existing MachineSet the replicas field was first set and is now unset. Those cases are especially relevant for the following Kubernetes autoscaler use cases: * A new MachineSet is created and replicas should be managed by the autoscaler * An existing MachineSet which initially wasn't controlled by the autoscaler\n should be later controlled by the autoscaler", Type: []string{"integer"}, Format: "int32", }, }, "minReadySeconds": { SchemaProps: spec.SchemaProps{ - Description: "MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", + Description: "MinReadySeconds is the minimum number of seconds for which a Node for a newly created machine should be ready before considering the replica available. Defaults to 0 (machine will be considered available as soon as the Node is ready)", Type: []string{"integer"}, Format: "int32", }, @@ -3080,11 +3455,17 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatch(ref common.Refe Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachineDeploymentClass"), }, }, + "machinePoolClass": { + SchemaProps: spec.SchemaProps{ + Description: "MachinePoolClass selects templates referenced in specific MachinePoolClasses in .spec.workers.machinePools.", + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachinePoolClass"), + }, + }, }, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachineDeploymentClass"}, + "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachineDeploymentClass", "sigs.k8s.io/cluster-api/api/v1beta1.PatchSelectorMatchMachinePoolClass"}, } } @@ -3116,6 +3497,34 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatchMachineDeploymen } } +func schema_sigsk8sio_cluster_api_api_v1beta1_PatchSelectorMatchMachinePoolClass(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PatchSelectorMatchMachinePoolClass selects templates referenced in specific MachinePoolClasses in .spec.workers.machinePools.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "names": { + SchemaProps: spec.SchemaProps{ + Description: "Names selects templates by class names.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + func schema_sigsk8sio_cluster_api_api_v1beta1_Topology(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3141,7 +3550,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_Topology(ref common.ReferenceCallb }, "rolloutAfter": { SchemaProps: spec.SchemaProps{ - Description: "RolloutAfter performs a rollout of the entire cluster one component at a time, control plane first and then machine deployments.", + Description: "RolloutAfter performs a rollout of the entire cluster one component at a time, control plane first and then machine deployments.\n\nDeprecated: This field has no function and is going to be removed in the next apiVersion.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -3204,8 +3613,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_UnhealthyCondition(ref common.Refe }, "timeout": { SchemaProps: spec.SchemaProps{ - Default: 0, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, }, @@ -3261,11 +3669,25 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_WorkersClass(ref common.ReferenceC }, }, }, + "machinePools": { + SchemaProps: spec.SchemaProps{ + Description: "MachinePools is a list of machine pool classes that can be used to create a set of worker nodes.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolClass"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClass"}, + "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClass", "sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolClass"}, } } @@ -3290,31 +3712,24 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_WorkersTopology(ref common.Referen }, }, }, - }, - }, - }, - Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentTopology"}, - } -} - -func schema_sigsk8sio_cluster_api_api_v1beta1_machineDeploymentDefaulter(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "machineDeploymentDefaulter implements a defaulting webhook for MachineDeployment.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "decoder": { + "machinePools": { SchemaProps: spec.SchemaProps{ - Ref: ref("sigs.k8s.io/controller-runtime/pkg/webhook/admission.Decoder"), + Description: "MachinePools is a list of machine pools in the cluster.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolTopology"), + }, + }, + }, }, }, }, - Required: []string{"decoder"}, }, }, Dependencies: []string{ - "sigs.k8s.io/controller-runtime/pkg/webhook/admission.Decoder"}, + "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentTopology", "sigs.k8s.io/cluster-api/api/v1beta1.MachinePoolTopology"}, } } diff --git a/bootstrap/kubeadm/PROJECT b/bootstrap/kubeadm/PROJECT deleted file mode 100644 index fd16be6f8792..000000000000 --- a/bootstrap/kubeadm/PROJECT +++ /dev/null @@ -1,22 +0,0 @@ -version: "2" -domain: cluster.x-k8s.io -repo: sigs.k8s.io/cluster-api/bootstrap/kubeadm -resources: -- group: bootstrap - version: v1alpha3 - kind: KubeadmConfig -- group: bootstrap - version: v1alpha3 - kind: KubeadmConfigTemplate -- group: bootstrap - version: v1alpha4 - kind: KubeadmConfig -- group: bootstrap - version: v1alpha4 - kind: KubeadmConfigTemplate -- group: bootstrap - version: v1beta1 - kind: KubeadmConfig -- group: bootstrap - version: v1beta1 - kind: KubeadmConfigTemplate diff --git a/bootstrap/kubeadm/api/.import-restrictions b/bootstrap/kubeadm/api/.import-restrictions new file mode 100644 index 000000000000..f6f10b3ff544 --- /dev/null +++ b/bootstrap/kubeadm/api/.import-restrictions @@ -0,0 +1,5 @@ +rules: + - selectorRegexp: sigs[.]k8s[.]io/controller-runtime + allowedPrefixes: + - "sigs.k8s.io/controller-runtime/pkg/conversion" + forbiddenPrefixes: [] diff --git a/bootstrap/kubeadm/api/v1alpha3/webhook_test.go b/bootstrap/kubeadm/api/v1alpha3/webhook_test.go deleted file mode 100644 index 62eba4b589b9..000000000000 --- a/bootstrap/kubeadm/api/v1alpha3/webhook_test.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha3 - -import ( - "fmt" - "testing" - - . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client" - - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta1" - "sigs.k8s.io/cluster-api/util" -) - -func TestKubeadmConfigConversion(t *testing.T) { - g := NewWithT(t) - ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) - g.Expect(err).ToNot(HaveOccurred()) - kubeadmConfigName := fmt.Sprintf("test-kubeadmconfig-%s", util.RandomString(5)) - kubeadmConfig := &KubeadmConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmConfigName, - Namespace: ns.Name, - }, - Spec: fakeKubeadmConfigSpec, - } - - g.Expect(env.Create(ctx, kubeadmConfig)).To(Succeed()) - defer func(do ...client.Object) { - g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) - }(ns, kubeadmConfig) -} - -func TestKubeadmConfigTemplateConversion(t *testing.T) { - g := NewWithT(t) - ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) - g.Expect(err).ToNot(HaveOccurred()) - kubeadmConfigTemplateName := fmt.Sprintf("test-kubeadmconfigtemplate-%s", util.RandomString(5)) - kubeadmConfigTemplate := &KubeadmConfigTemplate{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmConfigTemplateName, - Namespace: ns.Name, - }, - Spec: KubeadmConfigTemplateSpec{ - Template: KubeadmConfigTemplateResource{ - Spec: fakeKubeadmConfigSpec, - }, - }, - } - - g.Expect(env.Create(ctx, kubeadmConfigTemplate)).To(Succeed()) - defer func(do ...client.Object) { - g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) - }(ns, kubeadmConfigTemplate) -} - -var fakeKubeadmConfigSpec = KubeadmConfigSpec{ - ClusterConfiguration: &upstreamv1beta1.ClusterConfiguration{ - KubernetesVersion: "v1.20.2", - APIServer: upstreamv1beta1.APIServer{ - ControlPlaneComponent: upstreamv1beta1.ControlPlaneComponent{ - ExtraArgs: map[string]string{ - "foo": "bar", - }, - ExtraVolumes: []upstreamv1beta1.HostPathMount{ - { - Name: "mount-path", - HostPath: "/foo", - MountPath: "/foo", - ReadOnly: false, - }, - }, - }, - }, - }, - InitConfiguration: &upstreamv1beta1.InitConfiguration{ - NodeRegistration: upstreamv1beta1.NodeRegistrationOptions{ - Name: "foo", - CRISocket: "/var/run/containerd/containerd.sock", - }, - }, - JoinConfiguration: &upstreamv1beta1.JoinConfiguration{ - NodeRegistration: upstreamv1beta1.NodeRegistrationOptions{ - Name: "foo", - CRISocket: "/var/run/containerd/containerd.sock", - }, - }, - Files: []File{ - { - Path: "/foo", - Owner: "root:root", - Permissions: "0644", - Content: "foo", - }, - { - Path: "/foobar", - Owner: "root:root", - Permissions: "0644", - ContentFrom: &FileSource{ - Secret: SecretFileSource{ - Name: "foo", - Key: "bar", - }, - }, - }, - }, - DiskSetup: &DiskSetup{ - Partitions: []Partition{ - { - Device: "/dev/disk/scsi1/lun0", - Layout: true, - Overwrite: pointer.Bool(false), - TableType: pointer.String("gpt"), - }, - }, - Filesystems: []Filesystem{ - { - Device: "/dev/disk/scsi2/lun0", - Filesystem: "ext4", - Label: "disk", - Partition: pointer.String("auto"), - Overwrite: pointer.Bool(true), - ReplaceFS: pointer.String("ntfs"), - ExtraOpts: []string{"-E"}, - }, - }, - }, - Mounts: []MountPoints{ - { - "LABEL=disk", - "/var/lib/disk", - }, - }, - PreKubeadmCommands: []string{`echo "foo"`}, - PostKubeadmCommands: []string{`echo "bar"`}, - Users: []User{ - { - Name: "foo", - Groups: pointer.String("foo"), - HomeDir: pointer.String("/home/foo"), - Inactive: pointer.Bool(false), - Shell: pointer.String("/bin/bash"), - Passwd: pointer.String("password"), - SSHAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQD24GRNlhO+rgrseyWYrGwP0PACO/9JAsKV06W63yQ=="}, - }, - }, - NTP: &NTP{ - Servers: []string{"ntp.server.local"}, - Enabled: pointer.Bool(true), - }, - Format: Format("cloud-config"), - Verbosity: pointer.Int32(3), - UseExperimentalRetryJoin: true, -} diff --git a/bootstrap/kubeadm/api/v1beta1/.import-restrictions b/bootstrap/kubeadm/api/v1beta1/.import-restrictions new file mode 100644 index 000000000000..a2e1dfd08133 --- /dev/null +++ b/bootstrap/kubeadm/api/v1beta1/.import-restrictions @@ -0,0 +1,5 @@ +rules: + - selectorRegexp: sigs[.]k8s[.]io/controller-runtime + allowedPrefixes: [] + forbiddenPrefixes: + - "sigs.k8s.io/controller-runtime" diff --git a/bootstrap/kubeadm/api/v1beta1/groupversion_info.go b/bootstrap/kubeadm/api/v1beta1/groupversion_info.go index 40f78556dadb..cbd72aecbcbe 100644 --- a/bootstrap/kubeadm/api/v1beta1/groupversion_info.go +++ b/bootstrap/kubeadm/api/v1beta1/groupversion_info.go @@ -20,17 +20,26 @@ limitations under the License. package v1beta1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( // GroupVersion is group version used to register these objects. GroupVersion = schema.GroupVersion{Group: "bootstrap.cluster.x-k8s.io", Version: "v1beta1"} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme. - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + // schemeBuilder is used to add go types to the GroupVersionKind scheme. + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme + AddToScheme = schemeBuilder.AddToScheme + + objectTypes = []runtime.Object{} ) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, objectTypes...) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/bootstrap/kubeadm/api/v1beta1/kubeadm_types.go b/bootstrap/kubeadm/api/v1beta1/kubeadm_types.go index 653b4da552e8..67b4f51cd764 100644 --- a/bootstrap/kubeadm/api/v1beta1/kubeadm_types.go +++ b/bootstrap/kubeadm/api/v1beta1/kubeadm_types.go @@ -569,7 +569,7 @@ func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { // String returns the string representation of the BootstrapTokenString. func (bts BootstrapTokenString) String() string { - if len(bts.ID) > 0 && len(bts.Secret) > 0 { + if bts.ID != "" && bts.Secret != "" { return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) } return "" diff --git a/bootstrap/kubeadm/api/v1beta1/kubeadm_types_test.go b/bootstrap/kubeadm/api/v1beta1/kubeadm_types_test.go index 508263a039f4..e8d1e562d84c 100644 --- a/bootstrap/kubeadm/api/v1beta1/kubeadm_types_test.go +++ b/bootstrap/kubeadm/api/v1beta1/kubeadm_types_test.go @@ -18,9 +18,9 @@ package v1beta1 import ( "encoding/json" - "reflect" "testing" + "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -77,7 +77,7 @@ func TestNodeRegistrationOptionsMarshalJSON(t *testing.T) { g := NewWithT(t) b, err := tt.opts.MarshalJSON() - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(string(b)).To(Equal(tt.expected)) }) } @@ -97,7 +97,7 @@ func TestBootstrapTokenStringMarshalJSON(t *testing.T) { g := NewWithT(t) b, err := json.Marshal(rt.bts) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(string(b)).To(Equal(rt.expected)) }) } @@ -127,9 +127,9 @@ func TestBootstrapTokenStringUnmarshalJSON(t *testing.T) { if rt.expectedError { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } - g.Expect(newbts).To(Equal(rt.bts)) + g.Expect(newbts).To(BeComparableTo(rt.bts)) }) } } @@ -156,7 +156,7 @@ func roundtrip(input string, bts *BootstrapTokenString) error { var err error newbts := &BootstrapTokenString{} // If string input was specified, roundtrip like this: string -> (unmarshal) -> object -> (marshal) -> string - if len(input) > 0 { + if input != "" { if err := json.Unmarshal([]byte(input), newbts); err != nil { return errors.Wrap(err, "expected no unmarshal error, got error") } @@ -177,11 +177,12 @@ func roundtrip(input string, bts *BootstrapTokenString) error { if err := json.Unmarshal(b, newbts); err != nil { return errors.Wrap(err, "expected no unmarshal error, got error") } - if !reflect.DeepEqual(bts, newbts) { + if diff := cmp.Diff(bts, newbts); diff != "" { return errors.Errorf( - "expected object: %v\n\t actual: %v", + "expected object: %v\n\t actual: %v\n\t got diff: %v", bts, newbts, + diff, ) } } @@ -237,9 +238,9 @@ func TestNewBootstrapTokenString(t *testing.T) { if rt.expectedError { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } - g.Expect(actual).To(Equal(rt.bts)) + g.Expect(actual).To(BeComparableTo(rt.bts)) }) } } diff --git a/bootstrap/kubeadm/api/v1beta1/kubeadmconfig_types.go b/bootstrap/kubeadm/api/v1beta1/kubeadmconfig_types.go index fc6ebee0dc0c..70b9c133b45a 100644 --- a/bootstrap/kubeadm/api/v1beta1/kubeadmconfig_types.go +++ b/bootstrap/kubeadm/api/v1beta1/kubeadmconfig_types.go @@ -17,9 +17,13 @@ limitations under the License. package v1beta1 import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/feature" ) // Format specifies the output format of the bootstrap data @@ -34,6 +38,16 @@ const ( Ignition Format = "ignition" ) +var ( + cannotUseWithIgnition = fmt.Sprintf("not supported when spec.format is set to: %q", Ignition) + conflictingFileSourceMsg = "only one of content or contentFrom may be specified for a single file" + conflictingUserSourceMsg = "only one of passwd or passwdFrom may be specified for a single user" + kubeadmBootstrapFormatIgnitionFeatureDisabledMsg = "can be set only if the KubeadmBootstrapFormatIgnition feature gate is enabled" + missingSecretNameMsg = "secret file source must specify non-empty secret name" + missingSecretKeyMsg = "secret file source must specify non-empty secret key" + pathConflictMsg = "path property must be unique among all files" +) + // KubeadmConfigSpec defines the desired state of KubeadmConfig. // Either ClusterConfiguration and InitConfiguration should be defined or the JoinConfiguration should be defined. type KubeadmConfigSpec struct { @@ -107,6 +121,242 @@ type KubeadmConfigSpec struct { Ignition *IgnitionSpec `json:"ignition,omitempty"` } +// Default defaults a KubeadmConfigSpec. +func (c *KubeadmConfigSpec) Default() { + if c.Format == "" { + c.Format = CloudConfig + } + if c.InitConfiguration != nil && c.InitConfiguration.NodeRegistration.ImagePullPolicy == "" { + c.InitConfiguration.NodeRegistration.ImagePullPolicy = "IfNotPresent" + } + if c.JoinConfiguration != nil && c.JoinConfiguration.NodeRegistration.ImagePullPolicy == "" { + c.JoinConfiguration.NodeRegistration.ImagePullPolicy = "IfNotPresent" + } +} + +// Validate ensures the KubeadmConfigSpec is valid. +func (c *KubeadmConfigSpec) Validate(pathPrefix *field.Path) field.ErrorList { + var allErrs field.ErrorList + + allErrs = append(allErrs, c.validateFiles(pathPrefix)...) + allErrs = append(allErrs, c.validateUsers(pathPrefix)...) + allErrs = append(allErrs, c.validateIgnition(pathPrefix)...) + + return allErrs +} + +func (c *KubeadmConfigSpec) validateFiles(pathPrefix *field.Path) field.ErrorList { + var allErrs field.ErrorList + + knownPaths := map[string]struct{}{} + + for i := range c.Files { + file := c.Files[i] + if file.Content != "" && file.ContentFrom != nil { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("files").Index(i), + file, + conflictingFileSourceMsg, + ), + ) + } + // n.b.: if we ever add types besides Secret as a ContentFrom + // Source, we must add webhook validation here for one of the + // sources being non-nil. + if file.ContentFrom != nil { + if file.ContentFrom.Secret.Name == "" { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("files").Index(i).Child("contentFrom", "secret", "name"), + missingSecretNameMsg, + ), + ) + } + if file.ContentFrom.Secret.Key == "" { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("files").Index(i).Child("contentFrom", "secret", "key"), + missingSecretKeyMsg, + ), + ) + } + } + _, conflict := knownPaths[file.Path] + if conflict { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("files").Index(i).Child("path"), + file, + pathConflictMsg, + ), + ) + } + knownPaths[file.Path] = struct{}{} + } + + return allErrs +} + +func (c *KubeadmConfigSpec) validateUsers(pathPrefix *field.Path) field.ErrorList { + var allErrs field.ErrorList + + for i := range c.Users { + user := c.Users[i] + if user.Passwd != nil && user.PasswdFrom != nil { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("users").Index(i), + user, + conflictingUserSourceMsg, + ), + ) + } + // n.b.: if we ever add types besides Secret as a PasswdFrom + // Source, we must add webhook validation here for one of the + // sources being non-nil. + if user.PasswdFrom != nil { + if user.PasswdFrom.Secret.Name == "" { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("users").Index(i).Child("passwdFrom", "secret", "name"), + missingSecretNameMsg, + ), + ) + } + if user.PasswdFrom.Secret.Key == "" { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("users").Index(i).Child("passwdFrom", "secret", "key"), + missingSecretKeyMsg, + ), + ) + } + } + } + + return allErrs +} + +func (c *KubeadmConfigSpec) validateIgnition(pathPrefix *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if !feature.Gates.Enabled(feature.KubeadmBootstrapFormatIgnition) { + if c.Format == Ignition { + allErrs = append(allErrs, field.Forbidden( + pathPrefix.Child("format"), kubeadmBootstrapFormatIgnitionFeatureDisabledMsg)) + } + + if c.Ignition != nil { + allErrs = append(allErrs, field.Forbidden( + pathPrefix.Child("ignition"), kubeadmBootstrapFormatIgnitionFeatureDisabledMsg)) + } + + return allErrs + } + + if c.Format != Ignition { + if c.Ignition != nil { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("format"), + c.Format, + fmt.Sprintf("must be set to %q if spec.ignition is set", Ignition), + ), + ) + } + + return allErrs + } + + for i, user := range c.Users { + if user.Inactive != nil && *user.Inactive { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("users").Index(i).Child("inactive"), + cannotUseWithIgnition, + ), + ) + } + } + + if c.UseExperimentalRetryJoin { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("useExperimentalRetryJoin"), + cannotUseWithIgnition, + ), + ) + } + + for i, file := range c.Files { + if file.Encoding == Gzip || file.Encoding == GzipBase64 { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("files").Index(i).Child("encoding"), + cannotUseWithIgnition, + ), + ) + } + } + + if c.DiskSetup == nil { + return allErrs + } + + for i, partition := range c.DiskSetup.Partitions { + if partition.TableType != nil && *partition.TableType != "gpt" { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("diskSetup", "partitions").Index(i).Child("tableType"), + *partition.TableType, + fmt.Sprintf( + "only partition type %q is supported when spec.format is set to %q", + "gpt", + Ignition, + ), + ), + ) + } + } + + for i, fs := range c.DiskSetup.Filesystems { + if fs.ReplaceFS != nil { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("diskSetup", "filesystems").Index(i).Child("replaceFS"), + cannotUseWithIgnition, + ), + ) + } + + if fs.Partition != nil { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("diskSetup", "filesystems").Index(i).Child("partition"), + cannotUseWithIgnition, + ), + ) + } + } + + return allErrs +} + // IgnitionSpec contains Ignition specific configuration. type IgnitionSpec struct { // ContainerLinuxConfig contains CLC specific configuration. @@ -193,7 +443,7 @@ type KubeadmConfigList struct { } func init() { - SchemeBuilder.Register(&KubeadmConfig{}, &KubeadmConfigList{}) + objectTypes = append(objectTypes, &KubeadmConfig{}, &KubeadmConfigList{}) } // Encoding specifies the cloud-init file encoding. diff --git a/bootstrap/kubeadm/api/v1beta1/kubeadmconfig_webhook.go b/bootstrap/kubeadm/api/v1beta1/kubeadmconfig_webhook.go deleted file mode 100644 index 32879de190f0..000000000000 --- a/bootstrap/kubeadm/api/v1beta1/kubeadmconfig_webhook.go +++ /dev/null @@ -1,319 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" - - "sigs.k8s.io/cluster-api/feature" -) - -var ( - cannotUseWithIgnition = fmt.Sprintf("not supported when spec.format is set to %q", Ignition) - conflictingFileSourceMsg = "only one of content or contentFrom may be specified for a single file" - conflictingUserSourceMsg = "only one of passwd or passwdFrom may be specified for a single user" - kubeadmBootstrapFormatIgnitionFeatureDisabledMsg = "can be set only if the KubeadmBootstrapFormatIgnition feature gate is enabled" - missingSecretNameMsg = "secret file source must specify non-empty secret name" - missingSecretKeyMsg = "secret file source must specify non-empty secret key" - pathConflictMsg = "path property must be unique among all files" -) - -func (c *KubeadmConfig) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(c). - Complete() -} - -// +kubebuilder:webhook:verbs=create;update,path=/mutate-bootstrap-cluster-x-k8s-io-v1beta1-kubeadmconfig,mutating=true,failurePolicy=fail,groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigs,versions=v1beta1,name=default.kubeadmconfig.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 - -var _ webhook.Defaulter = &KubeadmConfig{} - -// Default implements webhook.Defaulter so a webhook will be registered for the type. -func (c *KubeadmConfig) Default() { - DefaultKubeadmConfigSpec(&c.Spec) -} - -// DefaultKubeadmConfigSpec defaults a KubeadmConfigSpec. -func DefaultKubeadmConfigSpec(r *KubeadmConfigSpec) { - if r.Format == "" { - r.Format = CloudConfig - } - if r.InitConfiguration != nil && r.InitConfiguration.NodeRegistration.ImagePullPolicy == "" { - r.InitConfiguration.NodeRegistration.ImagePullPolicy = "IfNotPresent" - } - if r.JoinConfiguration != nil && r.JoinConfiguration.NodeRegistration.ImagePullPolicy == "" { - r.JoinConfiguration.NodeRegistration.ImagePullPolicy = "IfNotPresent" - } -} - -// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta1-kubeadmconfig,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigs,versions=v1beta1,name=validation.kubeadmconfig.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 - -var _ webhook.Validator = &KubeadmConfig{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (c *KubeadmConfig) ValidateCreate() error { - return c.Spec.validate(c.Name) -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (c *KubeadmConfig) ValidateUpdate(_ runtime.Object) error { - return c.Spec.validate(c.Name) -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (c *KubeadmConfig) ValidateDelete() error { - return nil -} - -func (c *KubeadmConfigSpec) validate(name string) error { - allErrs := c.Validate(field.NewPath("spec")) - - if len(allErrs) == 0 { - return nil - } - - return apierrors.NewInvalid(GroupVersion.WithKind("KubeadmConfig").GroupKind(), name, allErrs) -} - -// Validate ensures the KubeadmConfigSpec is valid. -func (c *KubeadmConfigSpec) Validate(pathPrefix *field.Path) field.ErrorList { - var allErrs field.ErrorList - - allErrs = append(allErrs, c.validateFiles(pathPrefix)...) - allErrs = append(allErrs, c.validateUsers(pathPrefix)...) - allErrs = append(allErrs, c.validateIgnition(pathPrefix)...) - - return allErrs -} - -func (c *KubeadmConfigSpec) validateFiles(pathPrefix *field.Path) field.ErrorList { - var allErrs field.ErrorList - - knownPaths := map[string]struct{}{} - - for i := range c.Files { - file := c.Files[i] - if file.Content != "" && file.ContentFrom != nil { - allErrs = append( - allErrs, - field.Invalid( - pathPrefix.Child("files").Index(i), - file, - conflictingFileSourceMsg, - ), - ) - } - // n.b.: if we ever add types besides Secret as a ContentFrom - // Source, we must add webhook validation here for one of the - // sources being non-nil. - if file.ContentFrom != nil { - if file.ContentFrom.Secret.Name == "" { - allErrs = append( - allErrs, - field.Required( - pathPrefix.Child("files").Index(i).Child("contentFrom", "secret", "name"), - missingSecretNameMsg, - ), - ) - } - if file.ContentFrom.Secret.Key == "" { - allErrs = append( - allErrs, - field.Required( - pathPrefix.Child("files").Index(i).Child("contentFrom", "secret", "key"), - missingSecretKeyMsg, - ), - ) - } - } - _, conflict := knownPaths[file.Path] - if conflict { - allErrs = append( - allErrs, - field.Invalid( - pathPrefix.Child("files").Index(i).Child("path"), - file, - pathConflictMsg, - ), - ) - } - knownPaths[file.Path] = struct{}{} - } - - return allErrs -} - -func (c *KubeadmConfigSpec) validateUsers(pathPrefix *field.Path) field.ErrorList { - var allErrs field.ErrorList - - for i := range c.Users { - user := c.Users[i] - if user.Passwd != nil && user.PasswdFrom != nil { - allErrs = append( - allErrs, - field.Invalid( - pathPrefix.Child("users").Index(i), - user, - conflictingUserSourceMsg, - ), - ) - } - // n.b.: if we ever add types besides Secret as a PasswdFrom - // Source, we must add webhook validation here for one of the - // sources being non-nil. - if user.PasswdFrom != nil { - if user.PasswdFrom.Secret.Name == "" { - allErrs = append( - allErrs, - field.Required( - pathPrefix.Child("users").Index(i).Child("passwdFrom", "secret", "name"), - missingSecretNameMsg, - ), - ) - } - if user.PasswdFrom.Secret.Key == "" { - allErrs = append( - allErrs, - field.Required( - pathPrefix.Child("users").Index(i).Child("passwdFrom", "secret", "key"), - missingSecretKeyMsg, - ), - ) - } - } - } - - return allErrs -} - -func (c *KubeadmConfigSpec) validateIgnition(pathPrefix *field.Path) field.ErrorList { - var allErrs field.ErrorList - - if !feature.Gates.Enabled(feature.KubeadmBootstrapFormatIgnition) { - if c.Format == Ignition { - allErrs = append(allErrs, field.Forbidden( - pathPrefix.Child("format"), kubeadmBootstrapFormatIgnitionFeatureDisabledMsg)) - } - - if c.Ignition != nil { - allErrs = append(allErrs, field.Forbidden( - pathPrefix.Child("ignition"), kubeadmBootstrapFormatIgnitionFeatureDisabledMsg)) - } - - return allErrs - } - - if c.Format != Ignition { - if c.Ignition != nil { - allErrs = append( - allErrs, - field.Invalid( - pathPrefix.Child("format"), - c.Format, - fmt.Sprintf("must be set to %q if spec.ignition is set", Ignition), - ), - ) - } - - return allErrs - } - - for i, user := range c.Users { - if user.Inactive != nil && *user.Inactive { - allErrs = append( - allErrs, - field.Forbidden( - pathPrefix.Child("users").Index(i).Child("inactive"), - cannotUseWithIgnition, - ), - ) - } - } - - if c.UseExperimentalRetryJoin { - allErrs = append( - allErrs, - field.Forbidden( - pathPrefix.Child("useExperimentalRetryJoin"), - cannotUseWithIgnition, - ), - ) - } - - for i, file := range c.Files { - if file.Encoding == Gzip || file.Encoding == GzipBase64 { - allErrs = append( - allErrs, - field.Forbidden( - pathPrefix.Child("files").Index(i).Child("encoding"), - cannotUseWithIgnition, - ), - ) - } - } - - if c.DiskSetup == nil { - return allErrs - } - - for i, partition := range c.DiskSetup.Partitions { - if partition.TableType != nil && *partition.TableType != "gpt" { - allErrs = append( - allErrs, - field.Invalid( - pathPrefix.Child("diskSetup", "partitions").Index(i).Child("tableType"), - *partition.TableType, - fmt.Sprintf( - "only partition type %q is supported when spec.format is set to %q", - "gpt", - Ignition, - ), - ), - ) - } - } - - for i, fs := range c.DiskSetup.Filesystems { - if fs.ReplaceFS != nil { - allErrs = append( - allErrs, - field.Forbidden( - pathPrefix.Child("diskSetup", "filesystems").Index(i).Child("replaceFS"), - cannotUseWithIgnition, - ), - ) - } - - if fs.Partition != nil { - allErrs = append( - allErrs, - field.Forbidden( - pathPrefix.Child("diskSetup", "filesystems").Index(i).Child("partition"), - cannotUseWithIgnition, - ), - ) - } - } - - return allErrs -} diff --git a/bootstrap/kubeadm/api/v1beta1/kubeadmconfigtemplate_types.go b/bootstrap/kubeadm/api/v1beta1/kubeadmconfigtemplate_types.go index 47e5691e16a9..4326cb4599c1 100644 --- a/bootstrap/kubeadm/api/v1beta1/kubeadmconfigtemplate_types.go +++ b/bootstrap/kubeadm/api/v1beta1/kubeadmconfigtemplate_types.go @@ -60,5 +60,5 @@ type KubeadmConfigTemplateList struct { } func init() { - SchemeBuilder.Register(&KubeadmConfigTemplate{}, &KubeadmConfigTemplateList{}) + objectTypes = append(objectTypes, &KubeadmConfigTemplate{}, &KubeadmConfigTemplateList{}) } diff --git a/bootstrap/kubeadm/api/v1beta1/zz_generated.deepcopy.go b/bootstrap/kubeadm/api/v1beta1/zz_generated.deepcopy.go index 588c218d2d4a..425b90edab0b 100644 --- a/bootstrap/kubeadm/api/v1beta1/zz_generated.deepcopy.go +++ b/bootstrap/kubeadm/api/v1beta1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. diff --git a/bootstrap/kubeadm/config/certmanager/certificate.yaml b/bootstrap/kubeadm/config/certmanager/certificate.yaml index d53b0e8e97e7..58c9a367d0ec 100644 --- a/bootstrap/kubeadm/config/certmanager/certificate.yaml +++ b/bootstrap/kubeadm/config/certmanager/certificate.yaml @@ -15,14 +15,14 @@ metadata: name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml namespace: system spec: - # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize dnsNames: - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + - SERVICE_NAME.SERVICE_NAMESPACE.svc + - SERVICE_NAME.SERVICE_NAMESPACE.svc.cluster.local issuerRef: kind: Issuer name: selfsigned-issuer - secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize + secretName: capi-kubeadm-bootstrap-webhook-service-cert # this secret will not be prefixed, since it's not managed by kustomize subject: organizations: - - k8s-sig-cluster-lifecycle \ No newline at end of file + - k8s-sig-cluster-lifecycle diff --git a/bootstrap/kubeadm/config/certmanager/kustomizeconfig.yaml b/bootstrap/kubeadm/config/certmanager/kustomizeconfig.yaml index 28a895a404a9..87d212b8e0b3 100644 --- a/bootstrap/kubeadm/config/certmanager/kustomizeconfig.yaml +++ b/bootstrap/kubeadm/config/certmanager/kustomizeconfig.yaml @@ -6,14 +6,3 @@ nameReference: - kind: Certificate group: cert-manager.io path: spec/issuerRef/name - -varReference: -- kind: Certificate - group: cert-manager.io - path: spec/commonName -- kind: Certificate - group: cert-manager.io - path: spec/dnsNames -- kind: Certificate - group: cert-manager.io - path: spec/secretName diff --git a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml index f88b1bd00a26..3375f583c6cc 100644 --- a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml +++ b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: kubeadmconfigs.bootstrap.cluster.x-k8s.io spec: group: bootstrap.cluster.x-k8s.io @@ -17,28 +16,37 @@ spec: singular: kubeadmconfig scope: Namespaced versions: - - name: v1alpha3 + - deprecated: true + name: v1alpha3 schema: openAPIV3Schema: - description: "KubeadmConfig is the Schema for the kubeadmconfigs API. \n Deprecated: - This type will be removed in one of the next releases." + description: |- + KubeadmConfig is the Schema for the kubeadmconfigs API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: KubeadmConfigSpec defines the desired state of KubeadmConfig. - Either ClusterConfiguration and InitConfiguration should be defined - or the JoinConfiguration should be defined. + description: |- + KubeadmConfigSpec defines the desired state of KubeadmConfig. + Either ClusterConfiguration and InitConfiguration should be defined or the JoinConfiguration should be defined. properties: clusterConfiguration: description: ClusterConfiguration along with InitConfiguration are @@ -57,21 +65,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass to - the control plane component. TODO: This is temporary and - ideally we would like to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that will - be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod where @@ -98,33 +108,34 @@ spec: type: string type: object apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string certificatesDir: - description: 'CertificatesDir specifies where to store or look - for all required certificates. NB: if not provided, this will - default to `/etc/kubernetes/pki`' + description: |- + CertificatesDir specifies where to store or look for all required certificates. + NB: if not provided, this will default to `/etc/kubernetes/pki` type: string clusterName: description: The cluster name type: string controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP address or - DNS name for the control plane; it can be a valid IP address - or a RFC-1123 DNS subdomain, both with optional TCP port. In - case the ControlPlaneEndpoint is not specified, the AdvertiseAddress - + BindPort are used; in case the ControlPlaneEndpoint is specified - but without a TCP port, the BindPort is used. Possible usages - are: e.g. In a cluster with more than one control plane instances, - this field should be assigned the address of the external load - balancer in front of the control plane instances. e.g. in environments - with enforced node recycling, the ControlPlaneEndpoint could - be used for assigning a stable DNS to the control plane. NB: - This value defaults to the first value in the Cluster object - status.apiEndpoints array.' + description: |- + ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + the BindPort is used. + Possible usages are: + e.g. In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. + e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane. + NB: This value defaults to the first value in the Cluster object status.apiEndpoints array. type: string controllerManager: description: ControllerManager contains extra settings for the @@ -133,21 +144,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass to - the control plane component. TODO: This is temporary and - ideally we would like to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that will - be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod where @@ -174,36 +187,38 @@ spec: in the cluster. properties: imageRepository: - description: ImageRepository sets the container registry to - pull images from. if not set, the ImageRepository defined - in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for the image. - In case this value is set, kubeadm does not change automatically - the version of the above components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string type: description: Type defines the DNS add-on to be used type: string type: object etcd: - description: 'Etcd holds configuration for etcd. NB: This value - defaults to a Local (stacked) etcd' + description: |- + Etcd holds configuration for etcd. + NB: This value defaults to a Local (stacked) etcd properties: external: - description: External describes how to connect to an external - etcd cluster Local and External are mutually exclusive + description: |- + External describes how to connect to an external etcd cluster + Local and External are mutually exclusive properties: caFile: - description: CAFile is an SSL Certificate Authority file - used to secure etcd communication. Required if using - a TLS connection. + description: |- + CAFile is an SSL Certificate Authority file used to secure etcd communication. + Required if using a TLS connection. type: string certFile: - description: CertFile is an SSL certification file used - to secure etcd communication. Required if using a TLS - connection. + description: |- + CertFile is an SSL certification file used to secure etcd communication. + Required if using a TLS connection. type: string endpoints: description: Endpoints of etcd members. Required for ExternalEtcd. @@ -211,8 +226,9 @@ spec: type: string type: array keyFile: - description: KeyFile is an SSL key file used to secure - etcd communication. Required if using a TLS connection. + description: |- + KeyFile is an SSL key file used to secure etcd communication. + Required if using a TLS connection. type: string required: - caFile @@ -221,30 +237,31 @@ spec: - keyFile type: object local: - description: Local provides configuration knobs for configuring - the local etcd instance Local and External are mutually - exclusive + description: |- + Local provides configuration knobs for configuring the local etcd instance + Local and External are mutually exclusive properties: dataDir: - description: DataDir is the directory etcd will place - its data. Defaults to "/var/lib/etcd". + description: |- + DataDir is the directory etcd will place its data. + Defaults to "/var/lib/etcd". type: string extraArgs: additionalProperties: type: string - description: ExtraArgs are extra arguments provided to - the etcd binary when run inside a static pod. + description: |- + ExtraArgs are extra arguments provided to the etcd binary + when run inside a static pod. type: object imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for the - image. In case this value is set, kubeadm does not change - automatically the version of the above components during - upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string peerCertSANs: description: PeerCertSANs sets extra Subject Alternative @@ -266,44 +283,45 @@ spec: description: FeatureGates enabled by the user. type: object imageRepository: - description: ImageRepository sets the container registry to pull - images from. If empty, `k8s.gcr.io` will be used by default; - in case of kubernetes version is a CI build (kubernetes version - starts with `ci/` or `ci-cross/`) `gcr.io/k8s-staging-ci-images` - will be used as a default for control plane components and for - kube-proxy, while `k8s.gcr.io` will be used for all the other - images. + description: |- + ImageRepository sets the container registry to pull images from. + If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components and for kube-proxy, while `k8s.gcr.io` + will be used for all the other images. type: string kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string kubernetesVersion: - description: 'KubernetesVersion is the target version of the control - plane. NB: This value defaults to the Machine object spec.version' + description: |- + KubernetesVersion is the target version of the control plane. + NB: This value defaults to the Machine object spec.version type: string networking: - description: 'Networking holds configuration for the networking - topology of the cluster. NB: This value defaults to the Cluster - object spec.clusterNetwork.' + description: |- + Networking holds configuration for the networking topology of the cluster. + NB: This value defaults to the Cluster object spec.clusterNetwork. properties: dnsDomain: description: DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". type: string podSubnet: - description: PodSubnet is the subnet used by pods. If unset, - the API server will not allocate CIDR ranges for every node. - Defaults to a comma-delimited string of the Cluster object's - spec.clusterNetwork.services.cidrBlocks if that is set + description: |- + PodSubnet is the subnet used by pods. + If unset, the API server will not allocate CIDR ranges for every node. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.services.cidrBlocks if that is set type: string serviceSubnet: - description: ServiceSubnet is the subnet used by k8s services. - Defaults to a comma-delimited string of the Cluster object's - spec.clusterNetwork.pods.cidrBlocks, or to "10.96.0.0/12" - if that's unset. + description: |- + ServiceSubnet is the subnet used by k8s services. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, or + to "10.96.0.0/12" if that's unset. type: string type: object scheduler: @@ -313,21 +331,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass to - the control plane component. TODO: This is temporary and - ideally we would like to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that will - be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod where @@ -382,9 +402,9 @@ spec: used. If set to None, no label is used. type: string overwrite: - description: Overwrite defines whether or not to overwrite - any existing filesystem. If true, any pre-existing file - system will be destroyed. Use with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition to use. @@ -392,10 +412,9 @@ spec: and , where NUM is the actual partition number.' type: string replaceFS: - description: 'ReplaceFS is a special directive, used for - Microsoft Azure that instructs cloud-init to replace a - file system of . NOTE: unless you define a label, - this requires the use of the ''any'' partition directive.' + description: |- + ReplaceFS is a special directive, used for Microsoft Azure that instructs cloud-init to replace a file system of . + NOTE: unless you define a label, this requires the use of the 'any' partition directive. type: string required: - device @@ -413,21 +432,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. If it is - true, a single partition will be created for the entire - device. When layout is false, it means don't partition - or ignore existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to skip checks - and create the partition if a partition or filesystem - is found on the device. Use with caution. Default is 'false'. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of partition - table. The following are supported: ''mbr'': default and - setups a MS-DOS partition table ''gpt'': setups a GPT - partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -501,50 +520,52 @@ spec: the configurations necessary for the init command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm init` time - and describes a set of Bootstrap Tokens to create. This information - IS NOT uploaded to the kubeadm cluster configmap, partly because - of its sensitive nature + description: |- + BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature items: description: BootstrapToken describes one bootstrap token, stored as a Secret in the cluster. properties: description: - description: Description sets a human-friendly message why - this token exists and what it's used for, so other administrators - can know its purpose. + description: |- + Description sets a human-friendly message why this token exists and what it's used + for, so other administrators can know its purpose. type: string expires: - description: Expires specifies the timestamp when this token - expires. Defaults to being set dynamically at runtime - based on the TTL. Expires and TTL are mutually exclusive. + description: |- + Expires specifies the timestamp when this token expires. Defaults to being set + dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. format: date-time type: string groups: - description: Groups specifies the extra groups that this - token will authenticate as when/if used for authentication + description: |- + Groups specifies the extra groups that this token will authenticate as when/if + used for authentication items: type: string type: array token: - description: Token is used for establishing bidirectional - trust between nodes and control-planes. Used for joining - nodes in the cluster. + description: |- + Token is used for establishing bidirectional trust between nodes and control-planes. + Used for joining nodes in the cluster. type: string ttl: - description: TTL defines the time to live for this token. - Defaults to 24h. Expires and TTL are mutually exclusive. + description: |- + TTL defines the time to live for this token. Defaults to 24h. + Expires and TTL are mutually exclusive. type: string usages: - description: Usages describes the ways in which this token - can be used. Can by default be used for establishing bidirectional - trust, but that can be changed here. + description: |- + Usages describes the ways in which this token can be used. Can by default be used + for establishing bidirectional trust, but that can be changed here. items: type: string type: array @@ -553,21 +574,20 @@ spec: type: object type: array kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint of the API - server instance that's deployed on this control plane node In - HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint - in the sense that ControlPlaneEndpoint is the global endpoint - for the cluster, which then loadbalances the requests to each - individual API server. This configuration object lets you customize - what IP/DNS name and port the local API server advertises it's - accessible on. By default, kubeadm tries to auto-detect the - IP of the default interface and use that, but in case that process + description: |- + LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process fails you may set the desired value here. properties: advertiseAddress: @@ -575,8 +595,9 @@ spec: API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the API Server - to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer required: @@ -584,10 +605,10 @@ spec: - bindPort type: object nodeRegistration: - description: NodeRegistration holds fields that relate to registering - the new control-plane node to the cluster. When used in the - context of control plane nodes, NodeRegistration should remain - consistent across both InitConfiguration and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -597,47 +618,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the kubelet - command line via the environment file kubeadm writes at - runtime for the kubelet to source. This overrides the generic - base-level configuration in the kubelet-config-1.X ConfigMap - Flags have higher priority when parsing. These values are - local and specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the Node - API object that will be created in this `kubeadm init` or - `kubeadm join` operation. This field is also used in the - CommonName field of the kubelet's client certificate to - the API server. Defaults to the hostname of the node if - not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API object - should be registered with. If this field is unset, i.e. - nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. If - you don''t want to taint your control-plane node, set this - field to an empty slice, i.e. `taints: {}` in the YAML file. - This field is solely used for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has the - "effect" on any pod that does not tolerate the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on pods - that do not tolerate the taint. Valid effects are - NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -656,21 +671,23 @@ spec: join command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string caCertPath: - description: 'CACertPath is the path to the SSL certificate authority - used to secure comunications between node and control-plane. - Defaults to "/etc/kubernetes/pki/ca.crt". TODO: revisit when - there is defaulting from k/k' + description: |- + CACertPath is the path to the SSL certificate authority used to + secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + TODO: revisit when there is defaulting from k/k type: string controlPlane: - description: ControlPlane defines the additional control plane - instance to be deployed on the joining node. If nil, no additional - control plane instance will be deployed. + description: |- + ControlPlane defines the additional control plane instance to be deployed on the joining node. + If nil, no additional control plane instance will be deployed. properties: localAPIEndpoint: description: LocalAPIEndpoint represents the endpoint of the @@ -681,8 +698,9 @@ spec: the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the API - Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer required: @@ -691,52 +709,51 @@ spec: type: object type: object discovery: - description: 'Discovery specifies the options for the kubelet - to use during the TLS Bootstrap process TODO: revisit when there - is defaulting from k/k' + description: |- + Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + TODO: revisit when there is defaulting from k/k properties: bootstrapToken: - description: BootstrapToken is used to set the options for - bootstrap token based discovery BootstrapToken and File - are mutually exclusive + description: |- + BootstrapToken is used to set the options for bootstrap token based discovery + BootstrapToken and File are mutually exclusive properties: apiServerEndpoint: description: APIServerEndpoint is an IP or domain name to the API server from which info will be fetched. type: string caCertHashes: - description: 'CACertHashes specifies a set of public key - pins to verify when token-based discovery is used. The - root CA found during discovery must match one of these - values. Specifying an empty set disables root CA pinning, - which can be unsafe. Each hash is specified as ":", - where the only currently supported type is "sha256". - This is a hex-encoded SHA-256 hash of the Subject Public - Key Info (SPKI) object in DER-encoded ASN.1. These hashes - can be calculated using, for example, OpenSSL: openssl - x509 -pubkey -in ca.crt openssl rsa -pubin -outform - der 2>&/dev/null | openssl dgst -sha256 -hex' + description: |- + CACertHashes specifies a set of public key pins to verify + when token-based discovery is used. The root CA found during discovery + must match one of these values. Specifying an empty set disables root CA + pinning, which can be unsafe. Each hash is specified as ":", + where the only currently supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + ASN.1. These hashes can be calculated using, for example, OpenSSL: + openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex items: type: string type: array token: - description: Token is a token used to validate cluster - information fetched from the control-plane. + description: |- + Token is a token used to validate cluster information + fetched from the control-plane. type: string unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows token-based - discovery without CA verification via CACertHashes. - This can weaken the security of kubeadm since other - nodes can impersonate the control-plane. + description: |- + UnsafeSkipCAVerification allows token-based discovery + without CA verification via CACertHashes. This can weaken + the security of kubeadm since other nodes can impersonate the control-plane. type: boolean required: - token - unsafeSkipCAVerification type: object file: - description: File is used to specify a file or URL to a kubeconfig - file from which to load cluster information BootstrapToken - and File are mutually exclusive + description: |- + File is used to specify a file or URL to a kubeconfig file from which to load cluster information + BootstrapToken and File are mutually exclusive properties: kubeConfigPath: description: KubeConfigPath is used to specify the actual @@ -750,25 +767,26 @@ spec: description: Timeout modifies the discovery timeout type: string tlsBootstrapToken: - description: 'TLSBootstrapToken is a token used for TLS bootstrapping. - If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, - but can be overridden. If .File is set, this field **must - be set** in case the KubeConfigFile does not contain any - other authentication information TODO: revisit when there - is defaulting from k/k' + description: |- + TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information + TODO: revisit when there is defaulting from k/k type: string type: object kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string nodeRegistration: - description: NodeRegistration holds fields that relate to registering - the new control-plane node to the cluster. When used in the - context of control plane nodes, NodeRegistration should remain - consistent across both InitConfiguration and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -778,47 +796,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the kubelet - command line via the environment file kubeadm writes at - runtime for the kubelet to source. This overrides the generic - base-level configuration in the kubelet-config-1.X ConfigMap - Flags have higher priority when parsing. These values are - local and specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the Node - API object that will be created in this `kubeadm init` or - `kubeadm join` operation. This field is also used in the - CommonName field of the kubelet's client certificate to - the API server. Defaults to the hostname of the node if - not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API object - should be registered with. If this field is unset, i.e. - nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. If - you don''t want to taint your control-plane node, set this - field to an empty slice, i.e. `taints: {}` in the YAML file. - This field is solely used for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has the - "effect" on any pod that does not tolerate the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on pods - that do not tolerate the taint. Valid effects are - NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -865,13 +877,20 @@ spec: type: string type: array useExperimentalRetryJoin: - description: "UseExperimentalRetryJoin replaces a basic kubeadm command - with a shell script with retries for joins. \n This is meant to - be an experimental temporary workaround on some environments where - joins fail due to timing (and other issues). The long term goal - is to add retries to kubeadm proper and use that functionality. - \n This will add about 40KB to userdata \n For more information, - refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055." + description: |- + UseExperimentalRetryJoin replaces a basic kubeadm command with a shell + script with retries for joins. + + + This is meant to be an experimental temporary workaround on some environments + where joins fail due to timing (and other issues). The long term goal is to add retries to + kubeadm proper and use that functionality. + + + This will add about 40KB to userdata + + + For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. type: boolean users: description: Users specifies extra users to add @@ -924,7 +943,8 @@ spec: type: object type: array verbosity: - description: Verbosity is the number for the kubeadm log level verbosity. + description: |- + Verbosity is the number for the kubeadm log level verbosity. It overrides the `--v` flag in kubeadm commands. format: int32 type: integer @@ -933,8 +953,11 @@ spec: description: KubeadmConfigStatus defines the observed state of KubeadmConfig. properties: bootstrapData: - description: "BootstrapData will be a cloud-init script for now. \n - Deprecated: Switch to DataSecretName." + description: |- + BootstrapData will be a cloud-init script for now. + + + Deprecated: Switch to DataSecretName. format: byte type: string conditions: @@ -944,37 +967,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -1002,7 +1025,7 @@ spec: type: boolean type: object type: object - served: true + served: false storage: false subresources: status: {} @@ -1011,28 +1034,37 @@ spec: jsonPath: .metadata.creationTimestamp name: Age type: date + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "KubeadmConfig is the Schema for the kubeadmconfigs API. \n Deprecated: - This type will be removed in one of the next releases." + description: |- + KubeadmConfig is the Schema for the kubeadmconfigs API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: KubeadmConfigSpec defines the desired state of KubeadmConfig. - Either ClusterConfiguration and InitConfiguration should be defined - or the JoinConfiguration should be defined. + description: |- + KubeadmConfigSpec defines the desired state of KubeadmConfig. + Either ClusterConfiguration and InitConfiguration should be defined or the JoinConfiguration should be defined. properties: clusterConfiguration: description: ClusterConfiguration along with InitConfiguration are @@ -1051,21 +1083,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass to - the control plane component. TODO: This is temporary and - ideally we would like to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that will - be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod where @@ -1092,33 +1126,34 @@ spec: type: string type: object apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string certificatesDir: - description: 'CertificatesDir specifies where to store or look - for all required certificates. NB: if not provided, this will - default to `/etc/kubernetes/pki`' + description: |- + CertificatesDir specifies where to store or look for all required certificates. + NB: if not provided, this will default to `/etc/kubernetes/pki` type: string clusterName: description: The cluster name type: string controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP address or - DNS name for the control plane; it can be a valid IP address - or a RFC-1123 DNS subdomain, both with optional TCP port. In - case the ControlPlaneEndpoint is not specified, the AdvertiseAddress - + BindPort are used; in case the ControlPlaneEndpoint is specified - but without a TCP port, the BindPort is used. Possible usages - are: e.g. In a cluster with more than one control plane instances, - this field should be assigned the address of the external load - balancer in front of the control plane instances. e.g. in environments - with enforced node recycling, the ControlPlaneEndpoint could - be used for assigning a stable DNS to the control plane. NB: - This value defaults to the first value in the Cluster object - status.apiEndpoints array.' + description: |- + ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + the BindPort is used. + Possible usages are: + e.g. In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. + e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane. + NB: This value defaults to the first value in the Cluster object status.apiEndpoints array. type: string controllerManager: description: ControllerManager contains extra settings for the @@ -1127,21 +1162,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass to - the control plane component. TODO: This is temporary and - ideally we would like to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that will - be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod where @@ -1168,33 +1205,35 @@ spec: in the cluster. properties: imageRepository: - description: ImageRepository sets the container registry to - pull images from. if not set, the ImageRepository defined - in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for the image. - In case this value is set, kubeadm does not change automatically - the version of the above components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string type: object etcd: - description: 'Etcd holds configuration for etcd. NB: This value - defaults to a Local (stacked) etcd' + description: |- + Etcd holds configuration for etcd. + NB: This value defaults to a Local (stacked) etcd properties: external: - description: External describes how to connect to an external - etcd cluster Local and External are mutually exclusive + description: |- + External describes how to connect to an external etcd cluster + Local and External are mutually exclusive properties: caFile: - description: CAFile is an SSL Certificate Authority file - used to secure etcd communication. Required if using - a TLS connection. + description: |- + CAFile is an SSL Certificate Authority file used to secure etcd communication. + Required if using a TLS connection. type: string certFile: - description: CertFile is an SSL certification file used - to secure etcd communication. Required if using a TLS - connection. + description: |- + CertFile is an SSL certification file used to secure etcd communication. + Required if using a TLS connection. type: string endpoints: description: Endpoints of etcd members. Required for ExternalEtcd. @@ -1202,8 +1241,9 @@ spec: type: string type: array keyFile: - description: KeyFile is an SSL key file used to secure - etcd communication. Required if using a TLS connection. + description: |- + KeyFile is an SSL key file used to secure etcd communication. + Required if using a TLS connection. type: string required: - caFile @@ -1212,30 +1252,31 @@ spec: - keyFile type: object local: - description: Local provides configuration knobs for configuring - the local etcd instance Local and External are mutually - exclusive + description: |- + Local provides configuration knobs for configuring the local etcd instance + Local and External are mutually exclusive properties: dataDir: - description: DataDir is the directory etcd will place - its data. Defaults to "/var/lib/etcd". + description: |- + DataDir is the directory etcd will place its data. + Defaults to "/var/lib/etcd". type: string extraArgs: additionalProperties: type: string - description: ExtraArgs are extra arguments provided to - the etcd binary when run inside a static pod. + description: |- + ExtraArgs are extra arguments provided to the etcd binary + when run inside a static pod. type: object imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for the - image. In case this value is set, kubeadm does not change - automatically the version of the above components during - upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string peerCertSANs: description: PeerCertSANs sets extra Subject Alternative @@ -1257,44 +1298,45 @@ spec: description: FeatureGates enabled by the user. type: object imageRepository: - description: ImageRepository sets the container registry to pull - images from. If empty, `registry.k8s.io` will be used by default; - in case of kubernetes version is a CI build (kubernetes version - starts with `ci/` or `ci-cross/`) `gcr.io/k8s-staging-ci-images` - will be used as a default for control plane components and for - kube-proxy, while `registry.k8s.io` will be used for all the - other images. + description: |- + ImageRepository sets the container registry to pull images from. + If empty, `registry.k8s.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components and for kube-proxy, while `registry.k8s.io` + will be used for all the other images. type: string kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string kubernetesVersion: - description: 'KubernetesVersion is the target version of the control - plane. NB: This value defaults to the Machine object spec.version' + description: |- + KubernetesVersion is the target version of the control plane. + NB: This value defaults to the Machine object spec.version type: string networking: - description: 'Networking holds configuration for the networking - topology of the cluster. NB: This value defaults to the Cluster - object spec.clusterNetwork.' + description: |- + Networking holds configuration for the networking topology of the cluster. + NB: This value defaults to the Cluster object spec.clusterNetwork. properties: dnsDomain: description: DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". type: string podSubnet: - description: PodSubnet is the subnet used by pods. If unset, - the API server will not allocate CIDR ranges for every node. - Defaults to a comma-delimited string of the Cluster object's - spec.clusterNetwork.services.cidrBlocks if that is set + description: |- + PodSubnet is the subnet used by pods. + If unset, the API server will not allocate CIDR ranges for every node. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.services.cidrBlocks if that is set type: string serviceSubnet: - description: ServiceSubnet is the subnet used by k8s services. - Defaults to a comma-delimited string of the Cluster object's - spec.clusterNetwork.pods.cidrBlocks, or to "10.96.0.0/12" - if that's unset. + description: |- + ServiceSubnet is the subnet used by k8s services. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, or + to "10.96.0.0/12" if that's unset. type: string type: object scheduler: @@ -1304,21 +1346,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass to - the control plane component. TODO: This is temporary and - ideally we would like to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that will - be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod where @@ -1368,9 +1412,9 @@ spec: used. If set to None, no label is used. type: string overwrite: - description: Overwrite defines whether or not to overwrite - any existing filesystem. If true, any pre-existing file - system will be destroyed. Use with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition to use. @@ -1378,10 +1422,9 @@ spec: and , where NUM is the actual partition number.' type: string replaceFS: - description: 'ReplaceFS is a special directive, used for - Microsoft Azure that instructs cloud-init to replace a - file system of . NOTE: unless you define a label, - this requires the use of the ''any'' partition directive.' + description: |- + ReplaceFS is a special directive, used for Microsoft Azure that instructs cloud-init to replace a file system of . + NOTE: unless you define a label, this requires the use of the 'any' partition directive. type: string required: - device @@ -1399,21 +1442,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. If it is - true, a single partition will be created for the entire - device. When layout is false, it means don't partition - or ignore existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to skip checks - and create the partition if a partition or filesystem - is found on the device. Use with caution. Default is 'false'. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of partition - table. The following are supported: ''mbr'': default and - setups a MS-DOS partition table ''gpt'': setups a GPT - partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -1487,50 +1530,52 @@ spec: the configurations necessary for the init command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm init` time - and describes a set of Bootstrap Tokens to create. This information - IS NOT uploaded to the kubeadm cluster configmap, partly because - of its sensitive nature + description: |- + BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature items: description: BootstrapToken describes one bootstrap token, stored as a Secret in the cluster. properties: description: - description: Description sets a human-friendly message why - this token exists and what it's used for, so other administrators - can know its purpose. + description: |- + Description sets a human-friendly message why this token exists and what it's used + for, so other administrators can know its purpose. type: string expires: - description: Expires specifies the timestamp when this token - expires. Defaults to being set dynamically at runtime - based on the TTL. Expires and TTL are mutually exclusive. + description: |- + Expires specifies the timestamp when this token expires. Defaults to being set + dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. format: date-time type: string groups: - description: Groups specifies the extra groups that this - token will authenticate as when/if used for authentication + description: |- + Groups specifies the extra groups that this token will authenticate as when/if + used for authentication items: type: string type: array token: - description: Token is used for establishing bidirectional - trust between nodes and control-planes. Used for joining - nodes in the cluster. + description: |- + Token is used for establishing bidirectional trust between nodes and control-planes. + Used for joining nodes in the cluster. type: string ttl: - description: TTL defines the time to live for this token. - Defaults to 24h. Expires and TTL are mutually exclusive. + description: |- + TTL defines the time to live for this token. Defaults to 24h. + Expires and TTL are mutually exclusive. type: string usages: - description: Usages describes the ways in which this token - can be used. Can by default be used for establishing bidirectional - trust, but that can be changed here. + description: |- + Usages describes the ways in which this token can be used. Can by default be used + for establishing bidirectional trust, but that can be changed here. items: type: string type: array @@ -1539,21 +1584,20 @@ spec: type: object type: array kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint of the API - server instance that's deployed on this control plane node In - HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint - in the sense that ControlPlaneEndpoint is the global endpoint - for the cluster, which then loadbalances the requests to each - individual API server. This configuration object lets you customize - what IP/DNS name and port the local API server advertises it's - accessible on. By default, kubeadm tries to auto-detect the - IP of the default interface and use that, but in case that process + description: |- + LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process fails you may set the desired value here. properties: advertiseAddress: @@ -1561,16 +1605,17 @@ spec: API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the API Server - to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object nodeRegistration: - description: NodeRegistration holds fields that relate to registering - the new control-plane node to the cluster. When used in the - context of control plane nodes, NodeRegistration should remain - consistent across both InitConfiguration and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -1586,47 +1631,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the kubelet - command line via the environment file kubeadm writes at - runtime for the kubelet to source. This overrides the generic - base-level configuration in the kubelet-config-1.X ConfigMap - Flags have higher priority when parsing. These values are - local and specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the Node - API object that will be created in this `kubeadm init` or - `kubeadm join` operation. This field is also used in the - CommonName field of the kubelet's client certificate to - the API server. Defaults to the hostname of the node if - not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API object - should be registered with. If this field is unset, i.e. - nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. If - you don''t want to taint your control-plane node, set this - field to an empty slice, i.e. `taints: {}` in the YAML file. - This field is solely used for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has the - "effect" on any pod that does not tolerate the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on pods - that do not tolerate the taint. Valid effects are - NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -1645,21 +1684,23 @@ spec: join command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string caCertPath: - description: 'CACertPath is the path to the SSL certificate authority - used to secure comunications between node and control-plane. - Defaults to "/etc/kubernetes/pki/ca.crt". TODO: revisit when - there is defaulting from k/k' + description: |- + CACertPath is the path to the SSL certificate authority used to + secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + TODO: revisit when there is defaulting from k/k type: string controlPlane: - description: ControlPlane defines the additional control plane - instance to be deployed on the joining node. If nil, no additional - control plane instance will be deployed. + description: |- + ControlPlane defines the additional control plane instance to be deployed on the joining node. + If nil, no additional control plane instance will be deployed. properties: localAPIEndpoint: description: LocalAPIEndpoint represents the endpoint of the @@ -1670,58 +1711,58 @@ spec: the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the API - Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object type: object discovery: - description: 'Discovery specifies the options for the kubelet - to use during the TLS Bootstrap process TODO: revisit when there - is defaulting from k/k' + description: |- + Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + TODO: revisit when there is defaulting from k/k properties: bootstrapToken: - description: BootstrapToken is used to set the options for - bootstrap token based discovery BootstrapToken and File - are mutually exclusive + description: |- + BootstrapToken is used to set the options for bootstrap token based discovery + BootstrapToken and File are mutually exclusive properties: apiServerEndpoint: description: APIServerEndpoint is an IP or domain name to the API server from which info will be fetched. type: string caCertHashes: - description: 'CACertHashes specifies a set of public key - pins to verify when token-based discovery is used. The - root CA found during discovery must match one of these - values. Specifying an empty set disables root CA pinning, - which can be unsafe. Each hash is specified as ":", - where the only currently supported type is "sha256". - This is a hex-encoded SHA-256 hash of the Subject Public - Key Info (SPKI) object in DER-encoded ASN.1. These hashes - can be calculated using, for example, OpenSSL: openssl - x509 -pubkey -in ca.crt openssl rsa -pubin -outform - der 2>&/dev/null | openssl dgst -sha256 -hex' + description: |- + CACertHashes specifies a set of public key pins to verify + when token-based discovery is used. The root CA found during discovery + must match one of these values. Specifying an empty set disables root CA + pinning, which can be unsafe. Each hash is specified as ":", + where the only currently supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + ASN.1. These hashes can be calculated using, for example, OpenSSL: + openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex items: type: string type: array token: - description: Token is a token used to validate cluster - information fetched from the control-plane. + description: |- + Token is a token used to validate cluster information + fetched from the control-plane. type: string unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows token-based - discovery without CA verification via CACertHashes. - This can weaken the security of kubeadm since other - nodes can impersonate the control-plane. + description: |- + UnsafeSkipCAVerification allows token-based discovery + without CA verification via CACertHashes. This can weaken + the security of kubeadm since other nodes can impersonate the control-plane. type: boolean required: - token type: object file: - description: File is used to specify a file or URL to a kubeconfig - file from which to load cluster information BootstrapToken - and File are mutually exclusive + description: |- + File is used to specify a file or URL to a kubeconfig file from which to load cluster information + BootstrapToken and File are mutually exclusive properties: kubeConfigPath: description: KubeConfigPath is used to specify the actual @@ -1735,24 +1776,25 @@ spec: description: Timeout modifies the discovery timeout type: string tlsBootstrapToken: - description: TLSBootstrapToken is a token used for TLS bootstrapping. - If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, - but can be overridden. If .File is set, this field **must - be set** in case the KubeConfigFile does not contain any - other authentication information + description: |- + TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information type: string type: object kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string nodeRegistration: - description: NodeRegistration holds fields that relate to registering - the new control-plane node to the cluster. When used in the - context of control plane nodes, NodeRegistration should remain - consistent across both InitConfiguration and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -1768,47 +1810,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the kubelet - command line via the environment file kubeadm writes at - runtime for the kubelet to source. This overrides the generic - base-level configuration in the kubelet-config-1.X ConfigMap - Flags have higher priority when parsing. These values are - local and specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the Node - API object that will be created in this `kubeadm init` or - `kubeadm join` operation. This field is also used in the - CommonName field of the kubelet's client certificate to - the API server. Defaults to the hostname of the node if - not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API object - should be registered with. If this field is unset, i.e. - nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. If - you don''t want to taint your control-plane node, set this - field to an empty slice, i.e. `taints: {}` in the YAML file. - This field is solely used for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has the - "effect" on any pod that does not tolerate the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on pods - that do not tolerate the taint. Valid effects are - NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -1855,13 +1891,20 @@ spec: type: string type: array useExperimentalRetryJoin: - description: "UseExperimentalRetryJoin replaces a basic kubeadm command - with a shell script with retries for joins. \n This is meant to - be an experimental temporary workaround on some environments where - joins fail due to timing (and other issues). The long term goal - is to add retries to kubeadm proper and use that functionality. - \n This will add about 40KB to userdata \n For more information, - refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055." + description: |- + UseExperimentalRetryJoin replaces a basic kubeadm command with a shell + script with retries for joins. + + + This is meant to be an experimental temporary workaround on some environments + where joins fail due to timing (and other issues). The long term goal is to add retries to + kubeadm proper and use that functionality. + + + This will add about 40KB to userdata + + + For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. type: boolean users: description: Users specifies extra users to add @@ -1914,7 +1957,8 @@ spec: type: object type: array verbosity: - description: Verbosity is the number for the kubeadm log level verbosity. + description: |- + Verbosity is the number for the kubeadm log level verbosity. It overrides the `--v` flag in kubeadm commands. format: int32 type: integer @@ -1929,37 +1973,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -1987,7 +2031,7 @@ spec: type: boolean type: object type: object - served: true + served: false storage: false subresources: status: {} @@ -2006,21 +2050,26 @@ spec: description: KubeadmConfig is the Schema for the kubeadmconfigs API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: KubeadmConfigSpec defines the desired state of KubeadmConfig. - Either ClusterConfiguration and InitConfiguration should be defined - or the JoinConfiguration should be defined. + description: |- + KubeadmConfigSpec defines the desired state of KubeadmConfig. + Either ClusterConfiguration and InitConfiguration should be defined or the JoinConfiguration should be defined. properties: clusterConfiguration: description: ClusterConfiguration along with InitConfiguration are @@ -2039,21 +2088,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass to - the control plane component. TODO: This is temporary and - ideally we would like to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that will - be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod where @@ -2080,33 +2131,34 @@ spec: type: string type: object apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string certificatesDir: - description: 'CertificatesDir specifies where to store or look - for all required certificates. NB: if not provided, this will - default to `/etc/kubernetes/pki`' + description: |- + CertificatesDir specifies where to store or look for all required certificates. + NB: if not provided, this will default to `/etc/kubernetes/pki` type: string clusterName: description: The cluster name type: string controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP address or - DNS name for the control plane; it can be a valid IP address - or a RFC-1123 DNS subdomain, both with optional TCP port. In - case the ControlPlaneEndpoint is not specified, the AdvertiseAddress - + BindPort are used; in case the ControlPlaneEndpoint is specified - but without a TCP port, the BindPort is used. Possible usages - are: e.g. In a cluster with more than one control plane instances, - this field should be assigned the address of the external load - balancer in front of the control plane instances. e.g. in environments - with enforced node recycling, the ControlPlaneEndpoint could - be used for assigning a stable DNS to the control plane. NB: - This value defaults to the first value in the Cluster object - status.apiEndpoints array.' + description: |- + ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + the BindPort is used. + Possible usages are: + e.g. In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. + e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane. + NB: This value defaults to the first value in the Cluster object status.apiEndpoints array. type: string controllerManager: description: ControllerManager contains extra settings for the @@ -2115,21 +2167,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass to - the control plane component. TODO: This is temporary and - ideally we would like to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that will - be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod where @@ -2156,33 +2210,35 @@ spec: in the cluster. properties: imageRepository: - description: ImageRepository sets the container registry to - pull images from. if not set, the ImageRepository defined - in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for the image. - In case this value is set, kubeadm does not change automatically - the version of the above components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string type: object etcd: - description: 'Etcd holds configuration for etcd. NB: This value - defaults to a Local (stacked) etcd' + description: |- + Etcd holds configuration for etcd. + NB: This value defaults to a Local (stacked) etcd properties: external: - description: External describes how to connect to an external - etcd cluster Local and External are mutually exclusive + description: |- + External describes how to connect to an external etcd cluster + Local and External are mutually exclusive properties: caFile: - description: CAFile is an SSL Certificate Authority file - used to secure etcd communication. Required if using - a TLS connection. + description: |- + CAFile is an SSL Certificate Authority file used to secure etcd communication. + Required if using a TLS connection. type: string certFile: - description: CertFile is an SSL certification file used - to secure etcd communication. Required if using a TLS - connection. + description: |- + CertFile is an SSL certification file used to secure etcd communication. + Required if using a TLS connection. type: string endpoints: description: Endpoints of etcd members. Required for ExternalEtcd. @@ -2190,8 +2246,9 @@ spec: type: string type: array keyFile: - description: KeyFile is an SSL key file used to secure - etcd communication. Required if using a TLS connection. + description: |- + KeyFile is an SSL key file used to secure etcd communication. + Required if using a TLS connection. type: string required: - caFile @@ -2200,30 +2257,31 @@ spec: - keyFile type: object local: - description: Local provides configuration knobs for configuring - the local etcd instance Local and External are mutually - exclusive + description: |- + Local provides configuration knobs for configuring the local etcd instance + Local and External are mutually exclusive properties: dataDir: - description: DataDir is the directory etcd will place - its data. Defaults to "/var/lib/etcd". + description: |- + DataDir is the directory etcd will place its data. + Defaults to "/var/lib/etcd". type: string extraArgs: additionalProperties: type: string - description: ExtraArgs are extra arguments provided to - the etcd binary when run inside a static pod. + description: |- + ExtraArgs are extra arguments provided to the etcd binary + when run inside a static pod. type: object imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for the - image. In case this value is set, kubeadm does not change - automatically the version of the above components during - upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string peerCertSANs: description: PeerCertSANs sets extra Subject Alternative @@ -2245,50 +2303,52 @@ spec: description: FeatureGates enabled by the user. type: object imageRepository: - description: 'ImageRepository sets the container registry to pull - images from. * If not set, the default registry of kubeadm will - be used, i.e. * registry.k8s.io (new registry): >= v1.22.17, - >= v1.23.15, >= v1.24.9, >= v1.25.0 * k8s.gcr.io (old registry): - all older versions Please note that when imageRepository is - not set we don''t allow upgrades to versions >= v1.22.0 which - use the old registry (k8s.gcr.io). Please use a newer patch - version with the new registry instead (i.e. >= v1.22.17, >= - v1.23.15, >= v1.24.9, >= v1.25.0). * If the version is a CI - build (kubernetes version starts with `ci/` or `ci-cross/`) - `gcr.io/k8s-staging-ci-images` will be used as a default for - control plane components and for kube-proxy, while `registry.k8s.io` - will be used for all the other images.' + description: |- + ImageRepository sets the container registry to pull images from. + * If not set, the default registry of kubeadm will be used, i.e. + * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 + * k8s.gcr.io (old registry): all older versions + Please note that when imageRepository is not set we don't allow upgrades to + versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use + a newer patch version with the new registry instead (i.e. >= v1.22.17, + >= v1.23.15, >= v1.24.9, >= v1.25.0). + * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components + and for kube-proxy, while `registry.k8s.io` will be used for all the other images. type: string kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string kubernetesVersion: - description: 'KubernetesVersion is the target version of the control - plane. NB: This value defaults to the Machine object spec.version' + description: |- + KubernetesVersion is the target version of the control plane. + NB: This value defaults to the Machine object spec.version type: string networking: - description: 'Networking holds configuration for the networking - topology of the cluster. NB: This value defaults to the Cluster - object spec.clusterNetwork.' + description: |- + Networking holds configuration for the networking topology of the cluster. + NB: This value defaults to the Cluster object spec.clusterNetwork. properties: dnsDomain: description: DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". type: string podSubnet: - description: PodSubnet is the subnet used by pods. If unset, - the API server will not allocate CIDR ranges for every node. - Defaults to a comma-delimited string of the Cluster object's - spec.clusterNetwork.services.cidrBlocks if that is set + description: |- + PodSubnet is the subnet used by pods. + If unset, the API server will not allocate CIDR ranges for every node. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.services.cidrBlocks if that is set type: string serviceSubnet: - description: ServiceSubnet is the subnet used by k8s services. - Defaults to a comma-delimited string of the Cluster object's - spec.clusterNetwork.pods.cidrBlocks, or to "10.96.0.0/12" - if that's unset. + description: |- + ServiceSubnet is the subnet used by k8s services. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, or + to "10.96.0.0/12" if that's unset. type: string type: object scheduler: @@ -2298,21 +2358,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass to - the control plane component. TODO: This is temporary and - ideally we would like to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that will - be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod where @@ -2362,9 +2424,9 @@ spec: used. If set to None, no label is used. type: string overwrite: - description: Overwrite defines whether or not to overwrite - any existing filesystem. If true, any pre-existing file - system will be destroyed. Use with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition to use. @@ -2372,10 +2434,9 @@ spec: and , where NUM is the actual partition number.' type: string replaceFS: - description: 'ReplaceFS is a special directive, used for - Microsoft Azure that instructs cloud-init to replace a - file system of . NOTE: unless you define a label, - this requires the use of the ''any'' partition directive.' + description: |- + ReplaceFS is a special directive, used for Microsoft Azure that instructs cloud-init to replace a file system of . + NOTE: unless you define a label, this requires the use of the 'any' partition directive. type: string required: - device @@ -2393,21 +2454,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. If it is - true, a single partition will be created for the entire - device. When layout is false, it means don't partition - or ignore existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to skip checks - and create the partition if a partition or filesystem - is found on the device. Use with caution. Default is 'false'. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of partition - table. The following are supported: ''mbr'': default and - setups a MS-DOS partition table ''gpt'': setups a GPT - partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -2488,10 +2549,12 @@ spec: description: ContainerLinuxConfig contains CLC specific configuration. properties: additionalConfig: - description: "AdditionalConfig contains additional configuration - to be merged with the Ignition configuration generated by - the bootstrapper controller. More info: https://coreos.github.io/ignition/operator-notes/#config-merging - \n The data format is documented here: https://kinvolk.io/docs/flatcar-container-linux/latest/provisioning/cl-config/" + description: |- + AdditionalConfig contains additional configuration to be merged with the Ignition + configuration generated by the bootstrapper controller. More info: https://coreos.github.io/ignition/operator-notes/#config-merging + + + The data format is documented here: https://kinvolk.io/docs/flatcar-container-linux/latest/provisioning/cl-config/ type: string strict: description: Strict controls if AdditionalConfig should be @@ -2504,50 +2567,52 @@ spec: the configurations necessary for the init command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm init` time - and describes a set of Bootstrap Tokens to create. This information - IS NOT uploaded to the kubeadm cluster configmap, partly because - of its sensitive nature + description: |- + BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature items: description: BootstrapToken describes one bootstrap token, stored as a Secret in the cluster. properties: description: - description: Description sets a human-friendly message why - this token exists and what it's used for, so other administrators - can know its purpose. + description: |- + Description sets a human-friendly message why this token exists and what it's used + for, so other administrators can know its purpose. type: string expires: - description: Expires specifies the timestamp when this token - expires. Defaults to being set dynamically at runtime - based on the TTL. Expires and TTL are mutually exclusive. + description: |- + Expires specifies the timestamp when this token expires. Defaults to being set + dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. format: date-time type: string groups: - description: Groups specifies the extra groups that this - token will authenticate as when/if used for authentication + description: |- + Groups specifies the extra groups that this token will authenticate as when/if + used for authentication items: type: string type: array token: - description: Token is used for establishing bidirectional - trust between nodes and control-planes. Used for joining - nodes in the cluster. + description: |- + Token is used for establishing bidirectional trust between nodes and control-planes. + Used for joining nodes in the cluster. type: string ttl: - description: TTL defines the time to live for this token. - Defaults to 24h. Expires and TTL are mutually exclusive. + description: |- + TTL defines the time to live for this token. Defaults to 24h. + Expires and TTL are mutually exclusive. type: string usages: - description: Usages describes the ways in which this token - can be used. Can by default be used for establishing bidirectional - trust, but that can be changed here. + description: |- + Usages describes the ways in which this token can be used. Can by default be used + for establishing bidirectional trust, but that can be changed here. items: type: string type: array @@ -2556,21 +2621,20 @@ spec: type: object type: array kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint of the API - server instance that's deployed on this control plane node In - HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint - in the sense that ControlPlaneEndpoint is the global endpoint - for the cluster, which then loadbalances the requests to each - individual API server. This configuration object lets you customize - what IP/DNS name and port the local API server advertises it's - accessible on. By default, kubeadm tries to auto-detect the - IP of the default interface and use that, but in case that process + description: |- + LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process fails you may set the desired value here. properties: advertiseAddress: @@ -2578,16 +2642,17 @@ spec: API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the API Server - to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object nodeRegistration: - description: NodeRegistration holds fields that relate to registering - the new control-plane node to the cluster. When used in the - context of control plane nodes, NodeRegistration should remain - consistent across both InitConfiguration and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -2601,11 +2666,12 @@ spec: type: string type: array imagePullPolicy: - description: ImagePullPolicy specifies the policy for image - pulling during kubeadm "init" and "join" operations. The - value of this field must be one of "Always", "IfNotPresent" - or "Never". Defaults to "IfNotPresent". This can be used - only with Kubernetes version equal to 1.22 and later. + description: |- + ImagePullPolicy specifies the policy for image pulling + during kubeadm "init" and "join" operations. The value of + this field must be one of "Always", "IfNotPresent" or + "Never". Defaults to "IfNotPresent". This can be used only + with Kubernetes version equal to 1.22 and later. enum: - Always - IfNotPresent @@ -2614,47 +2680,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the kubelet - command line via the environment file kubeadm writes at - runtime for the kubelet to source. This overrides the generic - base-level configuration in the kubelet-config-1.X ConfigMap - Flags have higher priority when parsing. These values are - local and specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the Node - API object that will be created in this `kubeadm init` or - `kubeadm join` operation. This field is also used in the - CommonName field of the kubelet's client certificate to - the API server. Defaults to the hostname of the node if - not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API object - should be registered with. If this field is unset, i.e. - nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. If - you don''t want to taint your control-plane node, set this - field to an empty slice, i.e. `taints: []` in the YAML file. - This field is solely used for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: []` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has the - "effect" on any pod that does not tolerate the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on pods - that do not tolerate the taint. Valid effects are - NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -2668,31 +2728,29 @@ spec: type: array type: object patches: - description: Patches contains options related to applying patches - to components deployed by kubeadm during "kubeadm init". The - minimum kubernetes version needed to support Patches is v1.22 + description: |- + Patches contains options related to applying patches to components deployed by kubeadm during + "kubeadm init". The minimum kubernetes version needed to support Patches is v1.22 properties: directory: - description: Directory is a path to a directory that contains - files named "target[suffix][+patchtype].extension". For - example, "kube-apiserver0+merge.yaml" or just "etcd.json". - "target" can be one of "kube-apiserver", "kube-controller-manager", - "kube-scheduler", "etcd". "patchtype" can be one of "strategic" - "merge" or "json" and they match the patch formats supported - by kubectl. The default "patchtype" is "strategic". "extension" - must be either "json" or "yaml". "suffix" is an optional - string that can be used to determine which patches are applied - first alpha-numerically. These files can be written into - the target directory via KubeadmConfig.Files which specifies - additional files to be created on the machine, either with - content inline or by referencing a secret. + description: |- + Directory is a path to a directory that contains files named "target[suffix][+patchtype].extension". + For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of + "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one + of "strategic" "merge" or "json" and they match the patch formats supported by kubectl. + The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". + "suffix" is an optional string that can be used to determine which patches are applied + first alpha-numerically. + These files can be written into the target directory via KubeadmConfig.Files which + specifies additional files to be created on the machine, either with content inline or + by referencing a secret. type: string type: object skipPhases: - description: SkipPhases is a list of phases to skip during command - execution. The list of phases can be obtained with the "kubeadm - init --help" command. This option takes effect only on Kubernetes - >=1.22.0. + description: |- + SkipPhases is a list of phases to skip during command execution. + The list of phases can be obtained with the "kubeadm init --help" command. + This option takes effect only on Kubernetes >=1.22.0. items: type: string type: array @@ -2702,21 +2760,23 @@ spec: join command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string caCertPath: - description: 'CACertPath is the path to the SSL certificate authority - used to secure comunications between node and control-plane. - Defaults to "/etc/kubernetes/pki/ca.crt". TODO: revisit when - there is defaulting from k/k' + description: |- + CACertPath is the path to the SSL certificate authority used to + secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + TODO: revisit when there is defaulting from k/k type: string controlPlane: - description: ControlPlane defines the additional control plane - instance to be deployed on the joining node. If nil, no additional - control plane instance will be deployed. + description: |- + ControlPlane defines the additional control plane instance to be deployed on the joining node. + If nil, no additional control plane instance will be deployed. properties: localAPIEndpoint: description: LocalAPIEndpoint represents the endpoint of the @@ -2727,58 +2787,58 @@ spec: the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the API - Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object type: object discovery: - description: 'Discovery specifies the options for the kubelet - to use during the TLS Bootstrap process TODO: revisit when there - is defaulting from k/k' + description: |- + Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + TODO: revisit when there is defaulting from k/k properties: bootstrapToken: - description: BootstrapToken is used to set the options for - bootstrap token based discovery BootstrapToken and File - are mutually exclusive + description: |- + BootstrapToken is used to set the options for bootstrap token based discovery + BootstrapToken and File are mutually exclusive properties: apiServerEndpoint: description: APIServerEndpoint is an IP or domain name to the API server from which info will be fetched. type: string caCertHashes: - description: 'CACertHashes specifies a set of public key - pins to verify when token-based discovery is used. The - root CA found during discovery must match one of these - values. Specifying an empty set disables root CA pinning, - which can be unsafe. Each hash is specified as ":", - where the only currently supported type is "sha256". - This is a hex-encoded SHA-256 hash of the Subject Public - Key Info (SPKI) object in DER-encoded ASN.1. These hashes - can be calculated using, for example, OpenSSL: openssl - x509 -pubkey -in ca.crt openssl rsa -pubin -outform - der 2>&/dev/null | openssl dgst -sha256 -hex' + description: |- + CACertHashes specifies a set of public key pins to verify + when token-based discovery is used. The root CA found during discovery + must match one of these values. Specifying an empty set disables root CA + pinning, which can be unsafe. Each hash is specified as ":", + where the only currently supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + ASN.1. These hashes can be calculated using, for example, OpenSSL: + openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex items: type: string type: array token: - description: Token is a token used to validate cluster - information fetched from the control-plane. + description: |- + Token is a token used to validate cluster information + fetched from the control-plane. type: string unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows token-based - discovery without CA verification via CACertHashes. - This can weaken the security of kubeadm since other - nodes can impersonate the control-plane. + description: |- + UnsafeSkipCAVerification allows token-based discovery + without CA verification via CACertHashes. This can weaken + the security of kubeadm since other nodes can impersonate the control-plane. type: boolean required: - token type: object file: - description: File is used to specify a file or URL to a kubeconfig - file from which to load cluster information BootstrapToken - and File are mutually exclusive + description: |- + File is used to specify a file or URL to a kubeconfig file from which to load cluster information + BootstrapToken and File are mutually exclusive properties: kubeConfigPath: description: KubeConfigPath is used to specify the actual @@ -2792,24 +2852,25 @@ spec: description: Timeout modifies the discovery timeout type: string tlsBootstrapToken: - description: TLSBootstrapToken is a token used for TLS bootstrapping. - If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, - but can be overridden. If .File is set, this field **must - be set** in case the KubeConfigFile does not contain any - other authentication information + description: |- + TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information type: string type: object kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string nodeRegistration: - description: NodeRegistration holds fields that relate to registering - the new control-plane node to the cluster. When used in the - context of control plane nodes, NodeRegistration should remain - consistent across both InitConfiguration and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -2823,11 +2884,12 @@ spec: type: string type: array imagePullPolicy: - description: ImagePullPolicy specifies the policy for image - pulling during kubeadm "init" and "join" operations. The - value of this field must be one of "Always", "IfNotPresent" - or "Never". Defaults to "IfNotPresent". This can be used - only with Kubernetes version equal to 1.22 and later. + description: |- + ImagePullPolicy specifies the policy for image pulling + during kubeadm "init" and "join" operations. The value of + this field must be one of "Always", "IfNotPresent" or + "Never". Defaults to "IfNotPresent". This can be used only + with Kubernetes version equal to 1.22 and later. enum: - Always - IfNotPresent @@ -2836,47 +2898,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the kubelet - command line via the environment file kubeadm writes at - runtime for the kubelet to source. This overrides the generic - base-level configuration in the kubelet-config-1.X ConfigMap - Flags have higher priority when parsing. These values are - local and specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the Node - API object that will be created in this `kubeadm init` or - `kubeadm join` operation. This field is also used in the - CommonName field of the kubelet's client certificate to - the API server. Defaults to the hostname of the node if - not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API object - should be registered with. If this field is unset, i.e. - nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. If - you don''t want to taint your control-plane node, set this - field to an empty slice, i.e. `taints: []` in the YAML file. - This field is solely used for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: []` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has the - "effect" on any pod that does not tolerate the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on pods - that do not tolerate the taint. Valid effects are - NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -2890,31 +2946,29 @@ spec: type: array type: object patches: - description: Patches contains options related to applying patches - to components deployed by kubeadm during "kubeadm join". The - minimum kubernetes version needed to support Patches is v1.22 + description: |- + Patches contains options related to applying patches to components deployed by kubeadm during + "kubeadm join". The minimum kubernetes version needed to support Patches is v1.22 properties: directory: - description: Directory is a path to a directory that contains - files named "target[suffix][+patchtype].extension". For - example, "kube-apiserver0+merge.yaml" or just "etcd.json". - "target" can be one of "kube-apiserver", "kube-controller-manager", - "kube-scheduler", "etcd". "patchtype" can be one of "strategic" - "merge" or "json" and they match the patch formats supported - by kubectl. The default "patchtype" is "strategic". "extension" - must be either "json" or "yaml". "suffix" is an optional - string that can be used to determine which patches are applied - first alpha-numerically. These files can be written into - the target directory via KubeadmConfig.Files which specifies - additional files to be created on the machine, either with - content inline or by referencing a secret. + description: |- + Directory is a path to a directory that contains files named "target[suffix][+patchtype].extension". + For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of + "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one + of "strategic" "merge" or "json" and they match the patch formats supported by kubectl. + The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". + "suffix" is an optional string that can be used to determine which patches are applied + first alpha-numerically. + These files can be written into the target directory via KubeadmConfig.Files which + specifies additional files to be created on the machine, either with content inline or + by referencing a secret. type: string type: object skipPhases: - description: SkipPhases is a list of phases to skip during command - execution. The list of phases can be obtained with the "kubeadm - init --help" command. This option takes effect only on Kubernetes - >=1.22.0. + description: |- + SkipPhases is a list of phases to skip during command execution. + The list of phases can be obtained with the "kubeadm init --help" command. + This option takes effect only on Kubernetes >=1.22.0. items: type: string type: array @@ -2952,16 +3006,24 @@ spec: type: string type: array useExperimentalRetryJoin: - description: "UseExperimentalRetryJoin replaces a basic kubeadm command - with a shell script with retries for joins. \n This is meant to - be an experimental temporary workaround on some environments where - joins fail due to timing (and other issues). The long term goal - is to add retries to kubeadm proper and use that functionality. - \n This will add about 40KB to userdata \n For more information, - refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. - \n Deprecated: This experimental fix is no longer needed and this - field will be removed in a future release. When removing also remove - from staticcheck exclude-rules for SA1019 in golangci.yml" + description: |- + UseExperimentalRetryJoin replaces a basic kubeadm command with a shell + script with retries for joins. + + + This is meant to be an experimental temporary workaround on some environments + where joins fail due to timing (and other issues). The long term goal is to add retries to + kubeadm proper and use that functionality. + + + This will add about 40KB to userdata + + + For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. + + + Deprecated: This experimental fix is no longer needed and this field will be removed in a future release. + When removing also remove from staticcheck exclude-rules for SA1019 in golangci.yml type: boolean users: description: Users specifies extra users to add @@ -3037,7 +3099,8 @@ spec: type: object type: array verbosity: - description: Verbosity is the number for the kubeadm log level verbosity. + description: |- + Verbosity is the number for the kubeadm log level verbosity. It overrides the `--v` flag in kubeadm commands. format: int32 type: integer @@ -3052,37 +3115,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime diff --git a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml index abad81720568..b9970098e05d 100644 --- a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml +++ b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: kubeadmconfigtemplates.bootstrap.cluster.x-k8s.io spec: group: bootstrap.cluster.x-k8s.io @@ -17,21 +16,30 @@ spec: singular: kubeadmconfigtemplate scope: Namespaced versions: - - name: v1alpha3 + - deprecated: true + name: v1alpha3 schema: openAPIV3Schema: - description: "KubeadmConfigTemplate is the Schema for the kubeadmconfigtemplates - API. \n Deprecated: This type will be removed in one of the next releases." + description: |- + KubeadmConfigTemplate is the Schema for the kubeadmconfigtemplates API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -42,9 +50,9 @@ spec: description: KubeadmConfigTemplateResource defines the Template structure. properties: spec: - description: KubeadmConfigSpec defines the desired state of KubeadmConfig. - Either ClusterConfiguration and InitConfiguration should be - defined or the JoinConfiguration should be defined. + description: |- + KubeadmConfigSpec defines the desired state of KubeadmConfig. + Either ClusterConfiguration and InitConfiguration should be defined or the JoinConfiguration should be defined. properties: clusterConfiguration: description: ClusterConfiguration along with InitConfiguration @@ -63,21 +71,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to - pass to the control plane component. TODO: This - is temporary and ideally we would like to switch - all components to use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host - that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the @@ -106,35 +116,34 @@ spec: type: string type: object apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal value, - and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string certificatesDir: - description: 'CertificatesDir specifies where to store - or look for all required certificates. NB: if not provided, - this will default to `/etc/kubernetes/pki`' + description: |- + CertificatesDir specifies where to store or look for all required certificates. + NB: if not provided, this will default to `/etc/kubernetes/pki` type: string clusterName: description: The cluster name type: string controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP address - or DNS name for the control plane; it can be a valid - IP address or a RFC-1123 DNS subdomain, both with optional - TCP port. In case the ControlPlaneEndpoint is not specified, - the AdvertiseAddress + BindPort are used; in case the - ControlPlaneEndpoint is specified but without a TCP - port, the BindPort is used. Possible usages are: e.g. - In a cluster with more than one control plane instances, - this field should be assigned the address of the external - load balancer in front of the control plane instances. - e.g. in environments with enforced node recycling, - the ControlPlaneEndpoint could be used for assigning - a stable DNS to the control plane. NB: This value defaults - to the first value in the Cluster object status.apiEndpoints - array.' + description: |- + ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + the BindPort is used. + Possible usages are: + e.g. In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. + e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane. + NB: This value defaults to the first value in the Cluster object status.apiEndpoints array. type: string controllerManager: description: ControllerManager contains extra settings @@ -143,21 +152,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to - pass to the control plane component. TODO: This - is temporary and ideally we would like to switch - all components to use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host - that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the @@ -186,38 +197,38 @@ spec: installed in the cluster. properties: imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for - the image. In case this value is set, kubeadm does - not change automatically the version of the above - components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string type: description: Type defines the DNS add-on to be used type: string type: object etcd: - description: 'Etcd holds configuration for etcd. NB: This - value defaults to a Local (stacked) etcd' + description: |- + Etcd holds configuration for etcd. + NB: This value defaults to a Local (stacked) etcd properties: external: - description: External describes how to connect to - an external etcd cluster Local and External are - mutually exclusive + description: |- + External describes how to connect to an external etcd cluster + Local and External are mutually exclusive properties: caFile: - description: CAFile is an SSL Certificate Authority - file used to secure etcd communication. Required - if using a TLS connection. + description: |- + CAFile is an SSL Certificate Authority file used to secure etcd communication. + Required if using a TLS connection. type: string certFile: - description: CertFile is an SSL certification - file used to secure etcd communication. Required - if using a TLS connection. + description: |- + CertFile is an SSL certification file used to secure etcd communication. + Required if using a TLS connection. type: string endpoints: description: Endpoints of etcd members. Required @@ -226,9 +237,9 @@ spec: type: string type: array keyFile: - description: KeyFile is an SSL key file used to - secure etcd communication. Required if using - a TLS connection. + description: |- + KeyFile is an SSL key file used to secure etcd communication. + Required if using a TLS connection. type: string required: - caFile @@ -237,32 +248,31 @@ spec: - keyFile type: object local: - description: Local provides configuration knobs for - configuring the local etcd instance Local and External - are mutually exclusive + description: |- + Local provides configuration knobs for configuring the local etcd instance + Local and External are mutually exclusive properties: dataDir: - description: DataDir is the directory etcd will - place its data. Defaults to "/var/lib/etcd". + description: |- + DataDir is the directory etcd will place its data. + Defaults to "/var/lib/etcd". type: string extraArgs: additionalProperties: type: string - description: ExtraArgs are extra arguments provided - to the etcd binary when run inside a static - pod. + description: |- + ExtraArgs are extra arguments provided to the etcd binary + when run inside a static pod. type: object imageRepository: - description: ImageRepository sets the container - registry to pull images from. if not set, the - ImageRepository defined in ClusterConfiguration - will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag - for the image. In case this value is set, kubeadm - does not change automatically the version of - the above components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string peerCertSANs: description: PeerCertSANs sets extra Subject Alternative @@ -285,46 +295,45 @@ spec: description: FeatureGates enabled by the user. type: object imageRepository: - description: ImageRepository sets the container registry - to pull images from. If empty, `k8s.gcr.io` will be - used by default; in case of kubernetes version is a - CI build (kubernetes version starts with `ci/` or `ci-cross/`) - `gcr.io/k8s-staging-ci-images` will be used as a default - for control plane components and for kube-proxy, while - `k8s.gcr.io` will be used for all the other images. + description: |- + ImageRepository sets the container registry to pull images from. + If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components and for kube-proxy, while `k8s.gcr.io` + will be used for all the other images. type: string kind: - description: 'Kind is a string value representing the - REST resource this object represents. Servers may infer - this from the endpoint the client submits requests to. - Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string kubernetesVersion: - description: 'KubernetesVersion is the target version - of the control plane. NB: This value defaults to the - Machine object spec.version' + description: |- + KubernetesVersion is the target version of the control plane. + NB: This value defaults to the Machine object spec.version type: string networking: - description: 'Networking holds configuration for the networking - topology of the cluster. NB: This value defaults to - the Cluster object spec.clusterNetwork.' + description: |- + Networking holds configuration for the networking topology of the cluster. + NB: This value defaults to the Cluster object spec.clusterNetwork. properties: dnsDomain: description: DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". type: string podSubnet: - description: PodSubnet is the subnet used by pods. - If unset, the API server will not allocate CIDR - ranges for every node. Defaults to a comma-delimited - string of the Cluster object's spec.clusterNetwork.services.cidrBlocks - if that is set + description: |- + PodSubnet is the subnet used by pods. + If unset, the API server will not allocate CIDR ranges for every node. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.services.cidrBlocks if that is set type: string serviceSubnet: - description: ServiceSubnet is the subnet used by k8s - services. Defaults to a comma-delimited string of - the Cluster object's spec.clusterNetwork.pods.cidrBlocks, - or to "10.96.0.0/12" if that's unset. + description: |- + ServiceSubnet is the subnet used by k8s services. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, or + to "10.96.0.0/12" if that's unset. type: string type: object scheduler: @@ -334,21 +343,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to - pass to the control plane component. TODO: This - is temporary and ideally we would like to switch - all components to use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host - that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the @@ -407,10 +418,9 @@ spec: to be used. If set to None, no label is used. type: string overwrite: - description: Overwrite defines whether or not to - overwrite any existing filesystem. If true, any - pre-existing file system will be destroyed. Use - with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition @@ -419,11 +429,9 @@ spec: partition number.' type: string replaceFS: - description: 'ReplaceFS is a special directive, - used for Microsoft Azure that instructs cloud-init - to replace a file system of . NOTE: unless - you define a label, this requires the use of the - ''any'' partition directive.' + description: |- + ReplaceFS is a special directive, used for Microsoft Azure that instructs cloud-init to replace a file system of . + NOTE: unless you define a label, this requires the use of the 'any' partition directive. type: string required: - device @@ -442,22 +450,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. - If it is true, a single partition will be created - for the entire device. When layout is false, it - means don't partition or ignore existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to skip - checks and create the partition if a partition - or filesystem is found on the device. Use with - caution. Default is 'false'. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of partition - table. The following are supported: ''mbr'': default - and setups a MS-DOS partition table ''gpt'': setups - a GPT partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -533,54 +540,52 @@ spec: are the configurations necessary for the init command properties: apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal value, - and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm - init` time and describes a set of Bootstrap Tokens to - create. This information IS NOT uploaded to the kubeadm - cluster configmap, partly because of its sensitive nature + description: |- + BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature items: description: BootstrapToken describes one bootstrap token, stored as a Secret in the cluster. properties: description: - description: Description sets a human-friendly message - why this token exists and what it's used for, - so other administrators can know its purpose. + description: |- + Description sets a human-friendly message why this token exists and what it's used + for, so other administrators can know its purpose. type: string expires: - description: Expires specifies the timestamp when - this token expires. Defaults to being set dynamically - at runtime based on the TTL. Expires and TTL are - mutually exclusive. + description: |- + Expires specifies the timestamp when this token expires. Defaults to being set + dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. format: date-time type: string groups: - description: Groups specifies the extra groups that - this token will authenticate as when/if used for - authentication + description: |- + Groups specifies the extra groups that this token will authenticate as when/if + used for authentication items: type: string type: array token: - description: Token is used for establishing bidirectional - trust between nodes and control-planes. Used for - joining nodes in the cluster. + description: |- + Token is used for establishing bidirectional trust between nodes and control-planes. + Used for joining nodes in the cluster. type: string ttl: - description: TTL defines the time to live for this - token. Defaults to 24h. Expires and TTL are mutually - exclusive. + description: |- + TTL defines the time to live for this token. Defaults to 24h. + Expires and TTL are mutually exclusive. type: string usages: - description: Usages describes the ways in which - this token can be used. Can by default be used - for establishing bidirectional trust, but that - can be changed here. + description: |- + Usages describes the ways in which this token can be used. Can by default be used + for establishing bidirectional trust, but that can be changed here. items: type: string type: array @@ -589,22 +594,20 @@ spec: type: object type: array kind: - description: 'Kind is a string value representing the - REST resource this object represents. Servers may infer - this from the endpoint the client submits requests to. - Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint - of the API server instance that's deployed on this control - plane node In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint - in the sense that ControlPlaneEndpoint is the global - endpoint for the cluster, which then loadbalances the - requests to each individual API server. This configuration - object lets you customize what IP/DNS name and port - the local API server advertises it's accessible on. - By default, kubeadm tries to auto-detect the IP of the - default interface and use that, but in case that process + description: |- + LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process fails you may set the desired value here. properties: advertiseAddress: @@ -612,8 +615,9 @@ spec: for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the - API Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer required: @@ -621,11 +625,10 @@ spec: - bindPort type: object nodeRegistration: - description: NodeRegistration holds fields that relate - to registering the new control-plane node to the cluster. - When used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration - and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container @@ -635,51 +638,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra - arguments to the kubelet. The arguments here are - passed to the kubelet command line via the environment - file kubeadm writes at runtime for the kubelet to - source. This overrides the generic base-level configuration - in the kubelet-config-1.X ConfigMap Flags have higher - priority when parsing. These values are local and - specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of - the Node API object that will be created in this - `kubeadm init` or `kubeadm join` operation. This - field is also used in the CommonName field of the - kubelet's client certificate to the API server. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node - API object should be registered with. If this field - is unset, i.e. nil, in the `kubeadm init` process - it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: - {}` in the YAML file. This field is solely used - for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to - has the "effect" on any pod that does not tolerate - the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint - on pods that do not tolerate the taint. Valid - effects are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at - which the taint was added. It is only written - for NoExecute taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -698,21 +691,23 @@ spec: for the join command properties: apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal value, - and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string caCertPath: - description: 'CACertPath is the path to the SSL certificate - authority used to secure comunications between node - and control-plane. Defaults to "/etc/kubernetes/pki/ca.crt". - TODO: revisit when there is defaulting from k/k' + description: |- + CACertPath is the path to the SSL certificate authority used to + secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + TODO: revisit when there is defaulting from k/k type: string controlPlane: - description: ControlPlane defines the additional control - plane instance to be deployed on the joining node. If - nil, no additional control plane instance will be deployed. + description: |- + ControlPlane defines the additional control plane instance to be deployed on the joining node. + If nil, no additional control plane instance will be deployed. properties: localAPIEndpoint: description: LocalAPIEndpoint represents the endpoint @@ -724,8 +719,9 @@ spec: for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for - the API Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer required: @@ -734,14 +730,14 @@ spec: type: object type: object discovery: - description: 'Discovery specifies the options for the - kubelet to use during the TLS Bootstrap process TODO: - revisit when there is defaulting from k/k' + description: |- + Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + TODO: revisit when there is defaulting from k/k properties: bootstrapToken: - description: BootstrapToken is used to set the options - for bootstrap token based discovery BootstrapToken - and File are mutually exclusive + description: |- + BootstrapToken is used to set the options for bootstrap token based discovery + BootstrapToken and File are mutually exclusive properties: apiServerEndpoint: description: APIServerEndpoint is an IP or domain @@ -749,41 +745,37 @@ spec: be fetched. type: string caCertHashes: - description: 'CACertHashes specifies a set of - public key pins to verify when token-based discovery - is used. The root CA found during discovery - must match one of these values. Specifying an - empty set disables root CA pinning, which can - be unsafe. Each hash is specified as ":", - where the only currently supported type is "sha256". - This is a hex-encoded SHA-256 hash of the Subject - Public Key Info (SPKI) object in DER-encoded - ASN.1. These hashes can be calculated using, - for example, OpenSSL: openssl x509 -pubkey -in - ca.crt openssl rsa -pubin -outform der 2>&/dev/null - | openssl dgst -sha256 -hex' + description: |- + CACertHashes specifies a set of public key pins to verify + when token-based discovery is used. The root CA found during discovery + must match one of these values. Specifying an empty set disables root CA + pinning, which can be unsafe. Each hash is specified as ":", + where the only currently supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + ASN.1. These hashes can be calculated using, for example, OpenSSL: + openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex items: type: string type: array token: - description: Token is a token used to validate - cluster information fetched from the control-plane. + description: |- + Token is a token used to validate cluster information + fetched from the control-plane. type: string unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows token-based - discovery without CA verification via CACertHashes. - This can weaken the security of kubeadm since - other nodes can impersonate the control-plane. + description: |- + UnsafeSkipCAVerification allows token-based discovery + without CA verification via CACertHashes. This can weaken + the security of kubeadm since other nodes can impersonate the control-plane. type: boolean required: - token - unsafeSkipCAVerification type: object file: - description: File is used to specify a file or URL - to a kubeconfig file from which to load cluster - information BootstrapToken and File are mutually - exclusive + description: |- + File is used to specify a file or URL to a kubeconfig file from which to load cluster information + BootstrapToken and File are mutually exclusive properties: kubeConfigPath: description: KubeConfigPath is used to specify @@ -797,27 +789,26 @@ spec: description: Timeout modifies the discovery timeout type: string tlsBootstrapToken: - description: 'TLSBootstrapToken is a token used for - TLS bootstrapping. If .BootstrapToken is set, this - field is defaulted to .BootstrapToken.Token, but - can be overridden. If .File is set, this field **must - be set** in case the KubeConfigFile does not contain - any other authentication information TODO: revisit - when there is defaulting from k/k' + description: |- + TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information + TODO: revisit when there is defaulting from k/k type: string type: object kind: - description: 'Kind is a string value representing the - REST resource this object represents. Servers may infer - this from the endpoint the client submits requests to. - Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string nodeRegistration: - description: NodeRegistration holds fields that relate - to registering the new control-plane node to the cluster. - When used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration - and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container @@ -827,51 +818,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra - arguments to the kubelet. The arguments here are - passed to the kubelet command line via the environment - file kubeadm writes at runtime for the kubelet to - source. This overrides the generic base-level configuration - in the kubelet-config-1.X ConfigMap Flags have higher - priority when parsing. These values are local and - specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of - the Node API object that will be created in this - `kubeadm init` or `kubeadm join` operation. This - field is also used in the CommonName field of the - kubelet's client certificate to the API server. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node - API object should be registered with. If this field - is unset, i.e. nil, in the `kubeadm init` process - it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: - {}` in the YAML file. This field is solely used - for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to - has the "effect" on any pod that does not tolerate - the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint - on pods that do not tolerate the taint. Valid - effects are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at - which the taint was added. It is only written - for NoExecute taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -920,13 +901,20 @@ spec: type: string type: array useExperimentalRetryJoin: - description: "UseExperimentalRetryJoin replaces a basic kubeadm - command with a shell script with retries for joins. \n This - is meant to be an experimental temporary workaround on some - environments where joins fail due to timing (and other issues). - The long term goal is to add retries to kubeadm proper and - use that functionality. \n This will add about 40KB to userdata - \n For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055." + description: |- + UseExperimentalRetryJoin replaces a basic kubeadm command with a shell + script with retries for joins. + + + This is meant to be an experimental temporary workaround on some environments + where joins fail due to timing (and other issues). The long term goal is to add retries to + kubeadm proper and use that functionality. + + + This will add about 40KB to userdata + + + For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. type: boolean users: description: Users specifies extra users to add @@ -982,8 +970,9 @@ spec: type: object type: array verbosity: - description: Verbosity is the number for the kubeadm log level - verbosity. It overrides the `--v` flag in kubeadm commands. + description: |- + Verbosity is the number for the kubeadm log level verbosity. + It overrides the `--v` flag in kubeadm commands. format: int32 type: integer type: object @@ -992,28 +981,37 @@ spec: - template type: object type: object - served: true + served: false storage: false - additionalPrinterColumns: - description: Time duration since creation of KubeadmConfigTemplate jsonPath: .metadata.creationTimestamp name: Age type: date + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "KubeadmConfigTemplate is the Schema for the kubeadmconfigtemplates - API. \n Deprecated: This type will be removed in one of the next releases." + description: |- + KubeadmConfigTemplate is the Schema for the kubeadmconfigtemplates API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -1024,9 +1022,9 @@ spec: description: KubeadmConfigTemplateResource defines the Template structure. properties: spec: - description: KubeadmConfigSpec defines the desired state of KubeadmConfig. - Either ClusterConfiguration and InitConfiguration should be - defined or the JoinConfiguration should be defined. + description: |- + KubeadmConfigSpec defines the desired state of KubeadmConfig. + Either ClusterConfiguration and InitConfiguration should be defined or the JoinConfiguration should be defined. properties: clusterConfiguration: description: ClusterConfiguration along with InitConfiguration @@ -1045,21 +1043,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to - pass to the control plane component. TODO: This - is temporary and ideally we would like to switch - all components to use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host - that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the @@ -1088,35 +1088,34 @@ spec: type: string type: object apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal value, - and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string certificatesDir: - description: 'CertificatesDir specifies where to store - or look for all required certificates. NB: if not provided, - this will default to `/etc/kubernetes/pki`' + description: |- + CertificatesDir specifies where to store or look for all required certificates. + NB: if not provided, this will default to `/etc/kubernetes/pki` type: string clusterName: description: The cluster name type: string controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP address - or DNS name for the control plane; it can be a valid - IP address or a RFC-1123 DNS subdomain, both with optional - TCP port. In case the ControlPlaneEndpoint is not specified, - the AdvertiseAddress + BindPort are used; in case the - ControlPlaneEndpoint is specified but without a TCP - port, the BindPort is used. Possible usages are: e.g. - In a cluster with more than one control plane instances, - this field should be assigned the address of the external - load balancer in front of the control plane instances. - e.g. in environments with enforced node recycling, - the ControlPlaneEndpoint could be used for assigning - a stable DNS to the control plane. NB: This value defaults - to the first value in the Cluster object status.apiEndpoints - array.' + description: |- + ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + the BindPort is used. + Possible usages are: + e.g. In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. + e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane. + NB: This value defaults to the first value in the Cluster object status.apiEndpoints array. type: string controllerManager: description: ControllerManager contains extra settings @@ -1125,21 +1124,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to - pass to the control plane component. TODO: This - is temporary and ideally we would like to switch - all components to use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host - that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the @@ -1168,35 +1169,35 @@ spec: installed in the cluster. properties: imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for - the image. In case this value is set, kubeadm does - not change automatically the version of the above - components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string type: object etcd: - description: 'Etcd holds configuration for etcd. NB: This - value defaults to a Local (stacked) etcd' + description: |- + Etcd holds configuration for etcd. + NB: This value defaults to a Local (stacked) etcd properties: external: - description: External describes how to connect to - an external etcd cluster Local and External are - mutually exclusive + description: |- + External describes how to connect to an external etcd cluster + Local and External are mutually exclusive properties: caFile: - description: CAFile is an SSL Certificate Authority - file used to secure etcd communication. Required - if using a TLS connection. + description: |- + CAFile is an SSL Certificate Authority file used to secure etcd communication. + Required if using a TLS connection. type: string certFile: - description: CertFile is an SSL certification - file used to secure etcd communication. Required - if using a TLS connection. + description: |- + CertFile is an SSL certification file used to secure etcd communication. + Required if using a TLS connection. type: string endpoints: description: Endpoints of etcd members. Required @@ -1205,9 +1206,9 @@ spec: type: string type: array keyFile: - description: KeyFile is an SSL key file used to - secure etcd communication. Required if using - a TLS connection. + description: |- + KeyFile is an SSL key file used to secure etcd communication. + Required if using a TLS connection. type: string required: - caFile @@ -1216,32 +1217,31 @@ spec: - keyFile type: object local: - description: Local provides configuration knobs for - configuring the local etcd instance Local and External - are mutually exclusive + description: |- + Local provides configuration knobs for configuring the local etcd instance + Local and External are mutually exclusive properties: dataDir: - description: DataDir is the directory etcd will - place its data. Defaults to "/var/lib/etcd". + description: |- + DataDir is the directory etcd will place its data. + Defaults to "/var/lib/etcd". type: string extraArgs: additionalProperties: type: string - description: ExtraArgs are extra arguments provided - to the etcd binary when run inside a static - pod. + description: |- + ExtraArgs are extra arguments provided to the etcd binary + when run inside a static pod. type: object imageRepository: - description: ImageRepository sets the container - registry to pull images from. if not set, the - ImageRepository defined in ClusterConfiguration - will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag - for the image. In case this value is set, kubeadm - does not change automatically the version of - the above components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string peerCertSANs: description: PeerCertSANs sets extra Subject Alternative @@ -1264,47 +1264,45 @@ spec: description: FeatureGates enabled by the user. type: object imageRepository: - description: ImageRepository sets the container registry - to pull images from. If empty, `registry.k8s.io` will - be used by default; in case of kubernetes version is - a CI build (kubernetes version starts with `ci/` or - `ci-cross/`) `gcr.io/k8s-staging-ci-images` will be - used as a default for control plane components and for - kube-proxy, while `registry.k8s.io` will be used for - all the other images. + description: |- + ImageRepository sets the container registry to pull images from. + If empty, `registry.k8s.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components and for kube-proxy, while `registry.k8s.io` + will be used for all the other images. type: string kind: - description: 'Kind is a string value representing the - REST resource this object represents. Servers may infer - this from the endpoint the client submits requests to. - Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string kubernetesVersion: - description: 'KubernetesVersion is the target version - of the control plane. NB: This value defaults to the - Machine object spec.version' + description: |- + KubernetesVersion is the target version of the control plane. + NB: This value defaults to the Machine object spec.version type: string networking: - description: 'Networking holds configuration for the networking - topology of the cluster. NB: This value defaults to - the Cluster object spec.clusterNetwork.' + description: |- + Networking holds configuration for the networking topology of the cluster. + NB: This value defaults to the Cluster object spec.clusterNetwork. properties: dnsDomain: description: DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". type: string podSubnet: - description: PodSubnet is the subnet used by pods. - If unset, the API server will not allocate CIDR - ranges for every node. Defaults to a comma-delimited - string of the Cluster object's spec.clusterNetwork.services.cidrBlocks - if that is set + description: |- + PodSubnet is the subnet used by pods. + If unset, the API server will not allocate CIDR ranges for every node. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.services.cidrBlocks if that is set type: string serviceSubnet: - description: ServiceSubnet is the subnet used by k8s - services. Defaults to a comma-delimited string of - the Cluster object's spec.clusterNetwork.pods.cidrBlocks, - or to "10.96.0.0/12" if that's unset. + description: |- + ServiceSubnet is the subnet used by k8s services. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, or + to "10.96.0.0/12" if that's unset. type: string type: object scheduler: @@ -1314,21 +1312,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to - pass to the control plane component. TODO: This - is temporary and ideally we would like to switch - all components to use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host - that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the @@ -1382,10 +1382,9 @@ spec: to be used. If set to None, no label is used. type: string overwrite: - description: Overwrite defines whether or not to - overwrite any existing filesystem. If true, any - pre-existing file system will be destroyed. Use - with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition @@ -1394,11 +1393,9 @@ spec: partition number.' type: string replaceFS: - description: 'ReplaceFS is a special directive, - used for Microsoft Azure that instructs cloud-init - to replace a file system of . NOTE: unless - you define a label, this requires the use of the - ''any'' partition directive.' + description: |- + ReplaceFS is a special directive, used for Microsoft Azure that instructs cloud-init to replace a file system of . + NOTE: unless you define a label, this requires the use of the 'any' partition directive. type: string required: - device @@ -1417,22 +1414,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. - If it is true, a single partition will be created - for the entire device. When layout is false, it - means don't partition or ignore existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to skip - checks and create the partition if a partition - or filesystem is found on the device. Use with - caution. Default is 'false'. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of partition - table. The following are supported: ''mbr'': default - and setups a MS-DOS partition table ''gpt'': setups - a GPT partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -1508,54 +1504,52 @@ spec: are the configurations necessary for the init command properties: apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal value, - and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm - init` time and describes a set of Bootstrap Tokens to - create. This information IS NOT uploaded to the kubeadm - cluster configmap, partly because of its sensitive nature + description: |- + BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature items: description: BootstrapToken describes one bootstrap token, stored as a Secret in the cluster. properties: description: - description: Description sets a human-friendly message - why this token exists and what it's used for, - so other administrators can know its purpose. + description: |- + Description sets a human-friendly message why this token exists and what it's used + for, so other administrators can know its purpose. type: string expires: - description: Expires specifies the timestamp when - this token expires. Defaults to being set dynamically - at runtime based on the TTL. Expires and TTL are - mutually exclusive. + description: |- + Expires specifies the timestamp when this token expires. Defaults to being set + dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. format: date-time type: string groups: - description: Groups specifies the extra groups that - this token will authenticate as when/if used for - authentication + description: |- + Groups specifies the extra groups that this token will authenticate as when/if + used for authentication items: type: string type: array token: - description: Token is used for establishing bidirectional - trust between nodes and control-planes. Used for - joining nodes in the cluster. + description: |- + Token is used for establishing bidirectional trust between nodes and control-planes. + Used for joining nodes in the cluster. type: string ttl: - description: TTL defines the time to live for this - token. Defaults to 24h. Expires and TTL are mutually - exclusive. + description: |- + TTL defines the time to live for this token. Defaults to 24h. + Expires and TTL are mutually exclusive. type: string usages: - description: Usages describes the ways in which - this token can be used. Can by default be used - for establishing bidirectional trust, but that - can be changed here. + description: |- + Usages describes the ways in which this token can be used. Can by default be used + for establishing bidirectional trust, but that can be changed here. items: type: string type: array @@ -1564,22 +1558,20 @@ spec: type: object type: array kind: - description: 'Kind is a string value representing the - REST resource this object represents. Servers may infer - this from the endpoint the client submits requests to. - Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint - of the API server instance that's deployed on this control - plane node In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint - in the sense that ControlPlaneEndpoint is the global - endpoint for the cluster, which then loadbalances the - requests to each individual API server. This configuration - object lets you customize what IP/DNS name and port - the local API server advertises it's accessible on. - By default, kubeadm tries to auto-detect the IP of the - default interface and use that, but in case that process + description: |- + LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process fails you may set the desired value here. properties: advertiseAddress: @@ -1587,17 +1579,17 @@ spec: for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the - API Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object nodeRegistration: - description: NodeRegistration holds fields that relate - to registering the new control-plane node to the cluster. - When used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration - and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container @@ -1614,51 +1606,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra - arguments to the kubelet. The arguments here are - passed to the kubelet command line via the environment - file kubeadm writes at runtime for the kubelet to - source. This overrides the generic base-level configuration - in the kubelet-config-1.X ConfigMap Flags have higher - priority when parsing. These values are local and - specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of - the Node API object that will be created in this - `kubeadm init` or `kubeadm join` operation. This - field is also used in the CommonName field of the - kubelet's client certificate to the API server. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node - API object should be registered with. If this field - is unset, i.e. nil, in the `kubeadm init` process - it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: - {}` in the YAML file. This field is solely used - for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to - has the "effect" on any pod that does not tolerate - the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint - on pods that do not tolerate the taint. Valid - effects are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at - which the taint was added. It is only written - for NoExecute taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -1677,21 +1659,23 @@ spec: for the join command properties: apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal value, - and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string caCertPath: - description: 'CACertPath is the path to the SSL certificate - authority used to secure comunications between node - and control-plane. Defaults to "/etc/kubernetes/pki/ca.crt". - TODO: revisit when there is defaulting from k/k' + description: |- + CACertPath is the path to the SSL certificate authority used to + secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + TODO: revisit when there is defaulting from k/k type: string controlPlane: - description: ControlPlane defines the additional control - plane instance to be deployed on the joining node. If - nil, no additional control plane instance will be deployed. + description: |- + ControlPlane defines the additional control plane instance to be deployed on the joining node. + If nil, no additional control plane instance will be deployed. properties: localAPIEndpoint: description: LocalAPIEndpoint represents the endpoint @@ -1703,21 +1687,22 @@ spec: for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for - the API Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object type: object discovery: - description: 'Discovery specifies the options for the - kubelet to use during the TLS Bootstrap process TODO: - revisit when there is defaulting from k/k' + description: |- + Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + TODO: revisit when there is defaulting from k/k properties: bootstrapToken: - description: BootstrapToken is used to set the options - for bootstrap token based discovery BootstrapToken - and File are mutually exclusive + description: |- + BootstrapToken is used to set the options for bootstrap token based discovery + BootstrapToken and File are mutually exclusive properties: apiServerEndpoint: description: APIServerEndpoint is an IP or domain @@ -1725,40 +1710,36 @@ spec: be fetched. type: string caCertHashes: - description: 'CACertHashes specifies a set of - public key pins to verify when token-based discovery - is used. The root CA found during discovery - must match one of these values. Specifying an - empty set disables root CA pinning, which can - be unsafe. Each hash is specified as ":", - where the only currently supported type is "sha256". - This is a hex-encoded SHA-256 hash of the Subject - Public Key Info (SPKI) object in DER-encoded - ASN.1. These hashes can be calculated using, - for example, OpenSSL: openssl x509 -pubkey -in - ca.crt openssl rsa -pubin -outform der 2>&/dev/null - | openssl dgst -sha256 -hex' + description: |- + CACertHashes specifies a set of public key pins to verify + when token-based discovery is used. The root CA found during discovery + must match one of these values. Specifying an empty set disables root CA + pinning, which can be unsafe. Each hash is specified as ":", + where the only currently supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + ASN.1. These hashes can be calculated using, for example, OpenSSL: + openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex items: type: string type: array token: - description: Token is a token used to validate - cluster information fetched from the control-plane. + description: |- + Token is a token used to validate cluster information + fetched from the control-plane. type: string unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows token-based - discovery without CA verification via CACertHashes. - This can weaken the security of kubeadm since - other nodes can impersonate the control-plane. + description: |- + UnsafeSkipCAVerification allows token-based discovery + without CA verification via CACertHashes. This can weaken + the security of kubeadm since other nodes can impersonate the control-plane. type: boolean required: - token type: object file: - description: File is used to specify a file or URL - to a kubeconfig file from which to load cluster - information BootstrapToken and File are mutually - exclusive + description: |- + File is used to specify a file or URL to a kubeconfig file from which to load cluster information + BootstrapToken and File are mutually exclusive properties: kubeConfigPath: description: KubeConfigPath is used to specify @@ -1772,26 +1753,25 @@ spec: description: Timeout modifies the discovery timeout type: string tlsBootstrapToken: - description: TLSBootstrapToken is a token used for - TLS bootstrapping. If .BootstrapToken is set, this - field is defaulted to .BootstrapToken.Token, but - can be overridden. If .File is set, this field **must - be set** in case the KubeConfigFile does not contain - any other authentication information + description: |- + TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information type: string type: object kind: - description: 'Kind is a string value representing the - REST resource this object represents. Servers may infer - this from the endpoint the client submits requests to. - Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string nodeRegistration: - description: NodeRegistration holds fields that relate - to registering the new control-plane node to the cluster. - When used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration - and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container @@ -1808,51 +1788,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra - arguments to the kubelet. The arguments here are - passed to the kubelet command line via the environment - file kubeadm writes at runtime for the kubelet to - source. This overrides the generic base-level configuration - in the kubelet-config-1.X ConfigMap Flags have higher - priority when parsing. These values are local and - specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of - the Node API object that will be created in this - `kubeadm init` or `kubeadm join` operation. This - field is also used in the CommonName field of the - kubelet's client certificate to the API server. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node - API object should be registered with. If this field - is unset, i.e. nil, in the `kubeadm init` process - it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: - {}` in the YAML file. This field is solely used - for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to - has the "effect" on any pod that does not tolerate - the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint - on pods that do not tolerate the taint. Valid - effects are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at - which the taint was added. It is only written - for NoExecute taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -1901,13 +1871,20 @@ spec: type: string type: array useExperimentalRetryJoin: - description: "UseExperimentalRetryJoin replaces a basic kubeadm - command with a shell script with retries for joins. \n This - is meant to be an experimental temporary workaround on some - environments where joins fail due to timing (and other issues). - The long term goal is to add retries to kubeadm proper and - use that functionality. \n This will add about 40KB to userdata - \n For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055." + description: |- + UseExperimentalRetryJoin replaces a basic kubeadm command with a shell + script with retries for joins. + + + This is meant to be an experimental temporary workaround on some environments + where joins fail due to timing (and other issues). The long term goal is to add retries to + kubeadm proper and use that functionality. + + + This will add about 40KB to userdata + + + For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. type: boolean users: description: Users specifies extra users to add @@ -1963,8 +1940,9 @@ spec: type: object type: array verbosity: - description: Verbosity is the number for the kubeadm log level - verbosity. It overrides the `--v` flag in kubeadm commands. + description: |- + Verbosity is the number for the kubeadm log level verbosity. + It overrides the `--v` flag in kubeadm commands. format: int32 type: integer type: object @@ -1973,7 +1951,7 @@ spec: - template type: object type: object - served: true + served: false storage: false subresources: {} - additionalPrinterColumns: @@ -1988,14 +1966,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -2006,30 +1989,33 @@ spec: description: KubeadmConfigTemplateResource defines the Template structure. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: - description: KubeadmConfigSpec defines the desired state of KubeadmConfig. - Either ClusterConfiguration and InitConfiguration should be - defined or the JoinConfiguration should be defined. + description: |- + KubeadmConfigSpec defines the desired state of KubeadmConfig. + Either ClusterConfiguration and InitConfiguration should be defined or the JoinConfiguration should be defined. properties: clusterConfiguration: description: ClusterConfiguration along with InitConfiguration @@ -2048,21 +2034,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to - pass to the control plane component. TODO: This - is temporary and ideally we would like to switch - all components to use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host - that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the @@ -2091,35 +2079,34 @@ spec: type: string type: object apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal value, - and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string certificatesDir: - description: 'CertificatesDir specifies where to store - or look for all required certificates. NB: if not provided, - this will default to `/etc/kubernetes/pki`' + description: |- + CertificatesDir specifies where to store or look for all required certificates. + NB: if not provided, this will default to `/etc/kubernetes/pki` type: string clusterName: description: The cluster name type: string controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP address - or DNS name for the control plane; it can be a valid - IP address or a RFC-1123 DNS subdomain, both with optional - TCP port. In case the ControlPlaneEndpoint is not specified, - the AdvertiseAddress + BindPort are used; in case the - ControlPlaneEndpoint is specified but without a TCP - port, the BindPort is used. Possible usages are: e.g. - In a cluster with more than one control plane instances, - this field should be assigned the address of the external - load balancer in front of the control plane instances. - e.g. in environments with enforced node recycling, - the ControlPlaneEndpoint could be used for assigning - a stable DNS to the control plane. NB: This value defaults - to the first value in the Cluster object status.apiEndpoints - array.' + description: |- + ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + the BindPort is used. + Possible usages are: + e.g. In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. + e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane. + NB: This value defaults to the first value in the Cluster object status.apiEndpoints array. type: string controllerManager: description: ControllerManager contains extra settings @@ -2128,21 +2115,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to - pass to the control plane component. TODO: This - is temporary and ideally we would like to switch - all components to use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host - that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the @@ -2171,35 +2160,35 @@ spec: installed in the cluster. properties: imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for - the image. In case this value is set, kubeadm does - not change automatically the version of the above - components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string type: object etcd: - description: 'Etcd holds configuration for etcd. NB: This - value defaults to a Local (stacked) etcd' + description: |- + Etcd holds configuration for etcd. + NB: This value defaults to a Local (stacked) etcd properties: external: - description: External describes how to connect to - an external etcd cluster Local and External are - mutually exclusive + description: |- + External describes how to connect to an external etcd cluster + Local and External are mutually exclusive properties: caFile: - description: CAFile is an SSL Certificate Authority - file used to secure etcd communication. Required - if using a TLS connection. + description: |- + CAFile is an SSL Certificate Authority file used to secure etcd communication. + Required if using a TLS connection. type: string certFile: - description: CertFile is an SSL certification - file used to secure etcd communication. Required - if using a TLS connection. + description: |- + CertFile is an SSL certification file used to secure etcd communication. + Required if using a TLS connection. type: string endpoints: description: Endpoints of etcd members. Required @@ -2208,9 +2197,9 @@ spec: type: string type: array keyFile: - description: KeyFile is an SSL key file used to - secure etcd communication. Required if using - a TLS connection. + description: |- + KeyFile is an SSL key file used to secure etcd communication. + Required if using a TLS connection. type: string required: - caFile @@ -2219,32 +2208,31 @@ spec: - keyFile type: object local: - description: Local provides configuration knobs for - configuring the local etcd instance Local and External - are mutually exclusive + description: |- + Local provides configuration knobs for configuring the local etcd instance + Local and External are mutually exclusive properties: dataDir: - description: DataDir is the directory etcd will - place its data. Defaults to "/var/lib/etcd". + description: |- + DataDir is the directory etcd will place its data. + Defaults to "/var/lib/etcd". type: string extraArgs: additionalProperties: type: string - description: ExtraArgs are extra arguments provided - to the etcd binary when run inside a static - pod. + description: |- + ExtraArgs are extra arguments provided to the etcd binary + when run inside a static pod. type: object imageRepository: - description: ImageRepository sets the container - registry to pull images from. if not set, the - ImageRepository defined in ClusterConfiguration - will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag - for the image. In case this value is set, kubeadm - does not change automatically the version of - the above components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string peerCertSANs: description: PeerCertSANs sets extra Subject Alternative @@ -2267,54 +2255,52 @@ spec: description: FeatureGates enabled by the user. type: object imageRepository: - description: 'ImageRepository sets the container registry - to pull images from. * If not set, the default registry - of kubeadm will be used, i.e. * registry.k8s.io (new - registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= - v1.25.0 * k8s.gcr.io (old registry): all older versions - Please note that when imageRepository is not set we - don''t allow upgrades to versions >= v1.22.0 which use - the old registry (k8s.gcr.io). Please use a newer patch - version with the new registry instead (i.e. >= v1.22.17, - >= v1.23.15, >= v1.24.9, >= v1.25.0). * If the version - is a CI build (kubernetes version starts with `ci/` - or `ci-cross/`) `gcr.io/k8s-staging-ci-images` will - be used as a default for control plane components and - for kube-proxy, while `registry.k8s.io` will be used - for all the other images.' + description: |- + ImageRepository sets the container registry to pull images from. + * If not set, the default registry of kubeadm will be used, i.e. + * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 + * k8s.gcr.io (old registry): all older versions + Please note that when imageRepository is not set we don't allow upgrades to + versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use + a newer patch version with the new registry instead (i.e. >= v1.22.17, + >= v1.23.15, >= v1.24.9, >= v1.25.0). + * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components + and for kube-proxy, while `registry.k8s.io` will be used for all the other images. type: string kind: - description: 'Kind is a string value representing the - REST resource this object represents. Servers may infer - this from the endpoint the client submits requests to. - Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string kubernetesVersion: - description: 'KubernetesVersion is the target version - of the control plane. NB: This value defaults to the - Machine object spec.version' + description: |- + KubernetesVersion is the target version of the control plane. + NB: This value defaults to the Machine object spec.version type: string networking: - description: 'Networking holds configuration for the networking - topology of the cluster. NB: This value defaults to - the Cluster object spec.clusterNetwork.' + description: |- + Networking holds configuration for the networking topology of the cluster. + NB: This value defaults to the Cluster object spec.clusterNetwork. properties: dnsDomain: description: DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". type: string podSubnet: - description: PodSubnet is the subnet used by pods. - If unset, the API server will not allocate CIDR - ranges for every node. Defaults to a comma-delimited - string of the Cluster object's spec.clusterNetwork.services.cidrBlocks - if that is set + description: |- + PodSubnet is the subnet used by pods. + If unset, the API server will not allocate CIDR ranges for every node. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.services.cidrBlocks if that is set type: string serviceSubnet: - description: ServiceSubnet is the subnet used by k8s - services. Defaults to a comma-delimited string of - the Cluster object's spec.clusterNetwork.pods.cidrBlocks, - or to "10.96.0.0/12" if that's unset. + description: |- + ServiceSubnet is the subnet used by k8s services. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, or + to "10.96.0.0/12" if that's unset. type: string type: object scheduler: @@ -2324,21 +2310,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to - pass to the control plane component. TODO: This - is temporary and ideally we would like to switch - all components to use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host - that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the @@ -2392,10 +2380,9 @@ spec: to be used. If set to None, no label is used. type: string overwrite: - description: Overwrite defines whether or not to - overwrite any existing filesystem. If true, any - pre-existing file system will be destroyed. Use - with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition @@ -2404,11 +2391,9 @@ spec: partition number.' type: string replaceFS: - description: 'ReplaceFS is a special directive, - used for Microsoft Azure that instructs cloud-init - to replace a file system of . NOTE: unless - you define a label, this requires the use of the - ''any'' partition directive.' + description: |- + ReplaceFS is a special directive, used for Microsoft Azure that instructs cloud-init to replace a file system of . + NOTE: unless you define a label, this requires the use of the 'any' partition directive. type: string required: - device @@ -2427,22 +2412,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. - If it is true, a single partition will be created - for the entire device. When layout is false, it - means don't partition or ignore existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to skip - checks and create the partition if a partition - or filesystem is found on the device. Use with - caution. Default is 'false'. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of partition - table. The following are supported: ''mbr'': default - and setups a MS-DOS partition table ''gpt'': setups - a GPT partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -2526,11 +2510,12 @@ spec: configuration. properties: additionalConfig: - description: "AdditionalConfig contains additional - configuration to be merged with the Ignition configuration - generated by the bootstrapper controller. More info: - https://coreos.github.io/ignition/operator-notes/#config-merging - \n The data format is documented here: https://kinvolk.io/docs/flatcar-container-linux/latest/provisioning/cl-config/" + description: |- + AdditionalConfig contains additional configuration to be merged with the Ignition + configuration generated by the bootstrapper controller. More info: https://coreos.github.io/ignition/operator-notes/#config-merging + + + The data format is documented here: https://kinvolk.io/docs/flatcar-container-linux/latest/provisioning/cl-config/ type: string strict: description: Strict controls if AdditionalConfig should @@ -2544,54 +2529,52 @@ spec: are the configurations necessary for the init command properties: apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal value, - and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm - init` time and describes a set of Bootstrap Tokens to - create. This information IS NOT uploaded to the kubeadm - cluster configmap, partly because of its sensitive nature + description: |- + BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature items: description: BootstrapToken describes one bootstrap token, stored as a Secret in the cluster. properties: description: - description: Description sets a human-friendly message - why this token exists and what it's used for, - so other administrators can know its purpose. + description: |- + Description sets a human-friendly message why this token exists and what it's used + for, so other administrators can know its purpose. type: string expires: - description: Expires specifies the timestamp when - this token expires. Defaults to being set dynamically - at runtime based on the TTL. Expires and TTL are - mutually exclusive. + description: |- + Expires specifies the timestamp when this token expires. Defaults to being set + dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. format: date-time type: string groups: - description: Groups specifies the extra groups that - this token will authenticate as when/if used for - authentication + description: |- + Groups specifies the extra groups that this token will authenticate as when/if + used for authentication items: type: string type: array token: - description: Token is used for establishing bidirectional - trust between nodes and control-planes. Used for - joining nodes in the cluster. + description: |- + Token is used for establishing bidirectional trust between nodes and control-planes. + Used for joining nodes in the cluster. type: string ttl: - description: TTL defines the time to live for this - token. Defaults to 24h. Expires and TTL are mutually - exclusive. + description: |- + TTL defines the time to live for this token. Defaults to 24h. + Expires and TTL are mutually exclusive. type: string usages: - description: Usages describes the ways in which - this token can be used. Can by default be used - for establishing bidirectional trust, but that - can be changed here. + description: |- + Usages describes the ways in which this token can be used. Can by default be used + for establishing bidirectional trust, but that can be changed here. items: type: string type: array @@ -2600,22 +2583,20 @@ spec: type: object type: array kind: - description: 'Kind is a string value representing the - REST resource this object represents. Servers may infer - this from the endpoint the client submits requests to. - Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint - of the API server instance that's deployed on this control - plane node In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint - in the sense that ControlPlaneEndpoint is the global - endpoint for the cluster, which then loadbalances the - requests to each individual API server. This configuration - object lets you customize what IP/DNS name and port - the local API server advertises it's accessible on. - By default, kubeadm tries to auto-detect the IP of the - default interface and use that, but in case that process + description: |- + LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process fails you may set the desired value here. properties: advertiseAddress: @@ -2623,17 +2604,17 @@ spec: for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the - API Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object nodeRegistration: - description: NodeRegistration holds fields that relate - to registering the new control-plane node to the cluster. - When used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration - and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container @@ -2648,12 +2629,12 @@ spec: type: string type: array imagePullPolicy: - description: ImagePullPolicy specifies the policy - for image pulling during kubeadm "init" and "join" - operations. The value of this field must be one - of "Always", "IfNotPresent" or "Never". Defaults - to "IfNotPresent". This can be used only with Kubernetes - version equal to 1.22 and later. + description: |- + ImagePullPolicy specifies the policy for image pulling + during kubeadm "init" and "join" operations. The value of + this field must be one of "Always", "IfNotPresent" or + "Never". Defaults to "IfNotPresent". This can be used only + with Kubernetes version equal to 1.22 and later. enum: - Always - IfNotPresent @@ -2662,51 +2643,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra - arguments to the kubelet. The arguments here are - passed to the kubelet command line via the environment - file kubeadm writes at runtime for the kubelet to - source. This overrides the generic base-level configuration - in the kubelet-config-1.X ConfigMap Flags have higher - priority when parsing. These values are local and - specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of - the Node API object that will be created in this - `kubeadm init` or `kubeadm join` operation. This - field is also used in the CommonName field of the - kubelet's client certificate to the API server. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node - API object should be registered with. If this field - is unset, i.e. nil, in the `kubeadm init` process - it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: - []` in the YAML file. This field is solely used - for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: []` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to - has the "effect" on any pod that does not tolerate - the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint - on pods that do not tolerate the taint. Valid - effects are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at - which the taint was added. It is only written - for NoExecute taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -2720,34 +2691,29 @@ spec: type: array type: object patches: - description: Patches contains options related to applying - patches to components deployed by kubeadm during "kubeadm - init". The minimum kubernetes version needed to support - Patches is v1.22 + description: |- + Patches contains options related to applying patches to components deployed by kubeadm during + "kubeadm init". The minimum kubernetes version needed to support Patches is v1.22 properties: directory: - description: Directory is a path to a directory that - contains files named "target[suffix][+patchtype].extension". - For example, "kube-apiserver0+merge.yaml" or just - "etcd.json". "target" can be one of "kube-apiserver", - "kube-controller-manager", "kube-scheduler", "etcd". - "patchtype" can be one of "strategic" "merge" or - "json" and they match the patch formats supported - by kubectl. The default "patchtype" is "strategic". - "extension" must be either "json" or "yaml". "suffix" - is an optional string that can be used to determine - which patches are applied first alpha-numerically. - These files can be written into the target directory - via KubeadmConfig.Files which specifies additional - files to be created on the machine, either with - content inline or by referencing a secret. + description: |- + Directory is a path to a directory that contains files named "target[suffix][+patchtype].extension". + For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of + "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one + of "strategic" "merge" or "json" and they match the patch formats supported by kubectl. + The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". + "suffix" is an optional string that can be used to determine which patches are applied + first alpha-numerically. + These files can be written into the target directory via KubeadmConfig.Files which + specifies additional files to be created on the machine, either with content inline or + by referencing a secret. type: string type: object skipPhases: - description: SkipPhases is a list of phases to skip during - command execution. The list of phases can be obtained - with the "kubeadm init --help" command. This option - takes effect only on Kubernetes >=1.22.0. + description: |- + SkipPhases is a list of phases to skip during command execution. + The list of phases can be obtained with the "kubeadm init --help" command. + This option takes effect only on Kubernetes >=1.22.0. items: type: string type: array @@ -2757,21 +2723,23 @@ spec: for the join command properties: apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal value, - and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string caCertPath: - description: 'CACertPath is the path to the SSL certificate - authority used to secure comunications between node - and control-plane. Defaults to "/etc/kubernetes/pki/ca.crt". - TODO: revisit when there is defaulting from k/k' + description: |- + CACertPath is the path to the SSL certificate authority used to + secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + TODO: revisit when there is defaulting from k/k type: string controlPlane: - description: ControlPlane defines the additional control - plane instance to be deployed on the joining node. If - nil, no additional control plane instance will be deployed. + description: |- + ControlPlane defines the additional control plane instance to be deployed on the joining node. + If nil, no additional control plane instance will be deployed. properties: localAPIEndpoint: description: LocalAPIEndpoint represents the endpoint @@ -2783,21 +2751,22 @@ spec: for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for - the API Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object type: object discovery: - description: 'Discovery specifies the options for the - kubelet to use during the TLS Bootstrap process TODO: - revisit when there is defaulting from k/k' + description: |- + Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + TODO: revisit when there is defaulting from k/k properties: bootstrapToken: - description: BootstrapToken is used to set the options - for bootstrap token based discovery BootstrapToken - and File are mutually exclusive + description: |- + BootstrapToken is used to set the options for bootstrap token based discovery + BootstrapToken and File are mutually exclusive properties: apiServerEndpoint: description: APIServerEndpoint is an IP or domain @@ -2805,40 +2774,36 @@ spec: be fetched. type: string caCertHashes: - description: 'CACertHashes specifies a set of - public key pins to verify when token-based discovery - is used. The root CA found during discovery - must match one of these values. Specifying an - empty set disables root CA pinning, which can - be unsafe. Each hash is specified as ":", - where the only currently supported type is "sha256". - This is a hex-encoded SHA-256 hash of the Subject - Public Key Info (SPKI) object in DER-encoded - ASN.1. These hashes can be calculated using, - for example, OpenSSL: openssl x509 -pubkey -in - ca.crt openssl rsa -pubin -outform der 2>&/dev/null - | openssl dgst -sha256 -hex' + description: |- + CACertHashes specifies a set of public key pins to verify + when token-based discovery is used. The root CA found during discovery + must match one of these values. Specifying an empty set disables root CA + pinning, which can be unsafe. Each hash is specified as ":", + where the only currently supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + ASN.1. These hashes can be calculated using, for example, OpenSSL: + openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex items: type: string type: array token: - description: Token is a token used to validate - cluster information fetched from the control-plane. + description: |- + Token is a token used to validate cluster information + fetched from the control-plane. type: string unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows token-based - discovery without CA verification via CACertHashes. - This can weaken the security of kubeadm since - other nodes can impersonate the control-plane. + description: |- + UnsafeSkipCAVerification allows token-based discovery + without CA verification via CACertHashes. This can weaken + the security of kubeadm since other nodes can impersonate the control-plane. type: boolean required: - token type: object file: - description: File is used to specify a file or URL - to a kubeconfig file from which to load cluster - information BootstrapToken and File are mutually - exclusive + description: |- + File is used to specify a file or URL to a kubeconfig file from which to load cluster information + BootstrapToken and File are mutually exclusive properties: kubeConfigPath: description: KubeConfigPath is used to specify @@ -2852,26 +2817,25 @@ spec: description: Timeout modifies the discovery timeout type: string tlsBootstrapToken: - description: TLSBootstrapToken is a token used for - TLS bootstrapping. If .BootstrapToken is set, this - field is defaulted to .BootstrapToken.Token, but - can be overridden. If .File is set, this field **must - be set** in case the KubeConfigFile does not contain - any other authentication information + description: |- + TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information type: string type: object kind: - description: 'Kind is a string value representing the - REST resource this object represents. Servers may infer - this from the endpoint the client submits requests to. - Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string nodeRegistration: - description: NodeRegistration holds fields that relate - to registering the new control-plane node to the cluster. - When used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration - and JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container @@ -2886,12 +2850,12 @@ spec: type: string type: array imagePullPolicy: - description: ImagePullPolicy specifies the policy - for image pulling during kubeadm "init" and "join" - operations. The value of this field must be one - of "Always", "IfNotPresent" or "Never". Defaults - to "IfNotPresent". This can be used only with Kubernetes - version equal to 1.22 and later. + description: |- + ImagePullPolicy specifies the policy for image pulling + during kubeadm "init" and "join" operations. The value of + this field must be one of "Always", "IfNotPresent" or + "Never". Defaults to "IfNotPresent". This can be used only + with Kubernetes version equal to 1.22 and later. enum: - Always - IfNotPresent @@ -2900,51 +2864,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra - arguments to the kubelet. The arguments here are - passed to the kubelet command line via the environment - file kubeadm writes at runtime for the kubelet to - source. This overrides the generic base-level configuration - in the kubelet-config-1.X ConfigMap Flags have higher - priority when parsing. These values are local and - specific to the node kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of - the Node API object that will be created in this - `kubeadm init` or `kubeadm join` operation. This - field is also used in the CommonName field of the - kubelet's client certificate to the API server. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node - API object should be registered with. If this field - is unset, i.e. nil, in the `kubeadm init` process - it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: - []` in the YAML file. This field is solely used - for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: []` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to - has the "effect" on any pod that does not tolerate - the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint - on pods that do not tolerate the taint. Valid - effects are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at - which the taint was added. It is only written - for NoExecute taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -2958,34 +2912,29 @@ spec: type: array type: object patches: - description: Patches contains options related to applying - patches to components deployed by kubeadm during "kubeadm - join". The minimum kubernetes version needed to support - Patches is v1.22 + description: |- + Patches contains options related to applying patches to components deployed by kubeadm during + "kubeadm join". The minimum kubernetes version needed to support Patches is v1.22 properties: directory: - description: Directory is a path to a directory that - contains files named "target[suffix][+patchtype].extension". - For example, "kube-apiserver0+merge.yaml" or just - "etcd.json". "target" can be one of "kube-apiserver", - "kube-controller-manager", "kube-scheduler", "etcd". - "patchtype" can be one of "strategic" "merge" or - "json" and they match the patch formats supported - by kubectl. The default "patchtype" is "strategic". - "extension" must be either "json" or "yaml". "suffix" - is an optional string that can be used to determine - which patches are applied first alpha-numerically. - These files can be written into the target directory - via KubeadmConfig.Files which specifies additional - files to be created on the machine, either with - content inline or by referencing a secret. + description: |- + Directory is a path to a directory that contains files named "target[suffix][+patchtype].extension". + For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of + "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one + of "strategic" "merge" or "json" and they match the patch formats supported by kubectl. + The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". + "suffix" is an optional string that can be used to determine which patches are applied + first alpha-numerically. + These files can be written into the target directory via KubeadmConfig.Files which + specifies additional files to be created on the machine, either with content inline or + by referencing a secret. type: string type: object skipPhases: - description: SkipPhases is a list of phases to skip during - command execution. The list of phases can be obtained - with the "kubeadm init --help" command. This option - takes effect only on Kubernetes >=1.22.0. + description: |- + SkipPhases is a list of phases to skip during command execution. + The list of phases can be obtained with the "kubeadm init --help" command. + This option takes effect only on Kubernetes >=1.22.0. items: type: string type: array @@ -3025,17 +2974,24 @@ spec: type: string type: array useExperimentalRetryJoin: - description: "UseExperimentalRetryJoin replaces a basic kubeadm - command with a shell script with retries for joins. \n This - is meant to be an experimental temporary workaround on some - environments where joins fail due to timing (and other issues). - The long term goal is to add retries to kubeadm proper and - use that functionality. \n This will add about 40KB to userdata - \n For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. - \n Deprecated: This experimental fix is no longer needed - and this field will be removed in a future release. When - removing also remove from staticcheck exclude-rules for - SA1019 in golangci.yml" + description: |- + UseExperimentalRetryJoin replaces a basic kubeadm command with a shell + script with retries for joins. + + + This is meant to be an experimental temporary workaround on some environments + where joins fail due to timing (and other issues). The long term goal is to add retries to + kubeadm proper and use that functionality. + + + This will add about 40KB to userdata + + + For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. + + + Deprecated: This experimental fix is no longer needed and this field will be removed in a future release. + When removing also remove from staticcheck exclude-rules for SA1019 in golangci.yml type: boolean users: description: Users specifies extra users to add @@ -3114,8 +3070,9 @@ spec: type: object type: array verbosity: - description: Verbosity is the number for the kubeadm log level - verbosity. It overrides the `--v` flag in kubeadm commands. + description: |- + Verbosity is the number for the kubeadm log level verbosity. + It overrides the `--v` flag in kubeadm commands. format: int32 type: integer type: object diff --git a/bootstrap/kubeadm/config/crd/kustomization.yaml b/bootstrap/kubeadm/config/crd/kustomization.yaml index f273fbabc564..05d0c30aa081 100644 --- a/bootstrap/kubeadm/config/crd/kustomization.yaml +++ b/bootstrap/kubeadm/config/crd/kustomization.yaml @@ -1,29 +1,29 @@ -commonLabels: - cluster.x-k8s.io/v1alpha3: v1alpha3 - cluster.x-k8s.io/v1alpha4: v1alpha4 - cluster.x-k8s.io/v1beta1: v1beta1 +labels: +- includeSelectors: true + pairs: + cluster.x-k8s.io/v1beta1: v1beta1 # This kustomization.yaml is not intended to be run by itself, # since it depends on service name and namespace that are out of this kustomize package. # It should be run by config/ resources: - - bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml - - bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml +- bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml +- bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml # +kubebuilder:scaffold:crdkustomizeresource -patchesStrategicMerge: +patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD -- patches/webhook_in_kubeadmconfigs.yaml -- patches/webhook_in_kubeadmconfigtemplates.yaml +- path: patches/webhook_in_kubeadmconfigs.yaml +- path: patches/webhook_in_kubeadmconfigtemplates.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD -- patches/cainjection_in_kubeadmconfigs.yaml -- patches/cainjection_in_kubeadmconfigtemplates.yaml +- path: patches/cainjection_in_kubeadmconfigs.yaml +- path: patches/cainjection_in_kubeadmconfigtemplates.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. configurations: - - kustomizeconfig.yaml +- kustomizeconfig.yaml diff --git a/bootstrap/kubeadm/config/crd/kustomizeconfig.yaml b/bootstrap/kubeadm/config/crd/kustomizeconfig.yaml index e3fd575d604b..d10c3471df21 100644 --- a/bootstrap/kubeadm/config/crd/kustomizeconfig.yaml +++ b/bootstrap/kubeadm/config/crd/kustomizeconfig.yaml @@ -13,5 +13,3 @@ namespace: path: spec/conversion/webhook/clientConfig/service/namespace create: false -varReference: - - path: metadata/annotations diff --git a/bootstrap/kubeadm/config/crd/patches/cainjection_in_kubeadmconfigs.yaml b/bootstrap/kubeadm/config/crd/patches/cainjection_in_kubeadmconfigs.yaml index 0e7d35ae87fa..fb0a62fe2008 100644 --- a/bootstrap/kubeadm/config/crd/patches/cainjection_in_kubeadmconfigs.yaml +++ b/bootstrap/kubeadm/config/crd/patches/cainjection_in_kubeadmconfigs.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: kubeadmconfigs.bootstrap.cluster.x-k8s.io diff --git a/bootstrap/kubeadm/config/crd/patches/cainjection_in_kubeadmconfigtemplates.yaml b/bootstrap/kubeadm/config/crd/patches/cainjection_in_kubeadmconfigtemplates.yaml index fe6b5adf60c6..a8bc5dcf1d26 100644 --- a/bootstrap/kubeadm/config/crd/patches/cainjection_in_kubeadmconfigtemplates.yaml +++ b/bootstrap/kubeadm/config/crd/patches/cainjection_in_kubeadmconfigtemplates.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: kubeadmconfigtemplates.bootstrap.cluster.x-k8s.io diff --git a/bootstrap/kubeadm/config/crd/patches/webhook_in_kubeadmconfigs.yaml b/bootstrap/kubeadm/config/crd/patches/webhook_in_kubeadmconfigs.yaml index 11214bd21e27..372185f889fc 100644 --- a/bootstrap/kubeadm/config/crd/patches/webhook_in_kubeadmconfigs.yaml +++ b/bootstrap/kubeadm/config/crd/patches/webhook_in_kubeadmconfigs.yaml @@ -15,5 +15,5 @@ spec: caBundle: Cg== service: namespace: system - name: webhook-service + name: capi-kubeadm-bootstrap-webhook-service path: /convert diff --git a/bootstrap/kubeadm/config/default/kustomization.yaml b/bootstrap/kubeadm/config/default/kustomization.yaml index 340ed757c040..47a348068263 100644 --- a/bootstrap/kubeadm/config/default/kustomization.yaml +++ b/bootstrap/kubeadm/config/default/kustomization.yaml @@ -3,55 +3,122 @@ namespace: capi-kubeadm-bootstrap-system namePrefix: capi-kubeadm-bootstrap- -commonLabels: - cluster.x-k8s.io/provider: "bootstrap-kubeadm" +labels: +- includeSelectors: true + pairs: + cluster.x-k8s.io/provider: bootstrap-kubeadm resources: - namespace.yaml - -bases: - ../crd - ../rbac - ../manager - ../webhook - ../certmanager -patchesStrategicMerge: - # Provide customizable hook for make targets. - - manager_image_patch.yaml - - manager_pull_policy.yaml - # Enable webhook. - - manager_webhook_patch.yaml - # Inject certificate in the webhook definition. - - webhookcainjection_patch.yaml - -vars: - - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR - objref: - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert # this name should match the one in certificate.yaml - fieldref: - fieldpath: metadata.namespace - - name: CERTIFICATE_NAME - objref: - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert # this name should match the one in certificate.yaml - - name: SERVICE_NAMESPACE # namespace of the service - objref: - kind: Service - version: v1 - name: webhook-service - fieldref: - fieldpath: metadata.namespace - - name: SERVICE_NAME - objref: - kind: Service - version: v1 - name: webhook-service +patches: +# Provide customizable hook for make targets. +- path: manager_image_patch.yaml +- path: manager_pull_policy.yaml +# Enable webhook. +- path: manager_webhook_patch.yaml +# Inject certificate in the webhook definition. +- path: webhookcainjection_patch.yaml -configurations: - - kustomizeconfig.yaml +replacements: +- source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + fieldPath: .metadata.namespace # namespace of the certificate CR + targets: + - select: + kind: ValidatingWebhookConfiguration + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 0 + create: true + - select: + kind: MutatingWebhookConfiguration + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 0 + create: true + - select: + kind: CustomResourceDefinition + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 0 + create: true +- source: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + fieldPath: .metadata.name + targets: + - select: + kind: ValidatingWebhookConfiguration + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 1 + create: true + - select: + kind: MutatingWebhookConfiguration + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 1 + create: true + - select: + kind: CustomResourceDefinition + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 1 + create: true +- source: # Add cert-manager annotation to the webhook Service + kind: Service + version: v1 + name: webhook-service + fieldPath: .metadata.name # namespace of the service + targets: + - select: + kind: Certificate + group: cert-manager.io + version: v1 + fieldPaths: + - .spec.dnsNames.0 + - .spec.dnsNames.1 + options: + delimiter: '.' + index: 0 + create: true +- source: + kind: Service + version: v1 + name: webhook-service + fieldPath: .metadata.namespace # namespace of the service + targets: + - select: + kind: Certificate + group: cert-manager.io + version: v1 + fieldPaths: + - .spec.dnsNames.0 + - .spec.dnsNames.1 + options: + delimiter: '.' + index: 1 + create: true diff --git a/bootstrap/kubeadm/config/default/kustomizeconfig.yaml b/bootstrap/kubeadm/config/default/kustomizeconfig.yaml deleted file mode 100644 index eb191e64d056..000000000000 --- a/bootstrap/kubeadm/config/default/kustomizeconfig.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# This configuration is for teaching kustomize how to update name ref and var substitution -varReference: -- kind: Deployment - path: spec/template/spec/volumes/secret/secretName diff --git a/bootstrap/kubeadm/config/default/manager_webhook_patch.yaml b/bootstrap/kubeadm/config/default/manager_webhook_patch.yaml index bccef6d70db8..d5f36ed1f67d 100644 --- a/bootstrap/kubeadm/config/default/manager_webhook_patch.yaml +++ b/bootstrap/kubeadm/config/default/manager_webhook_patch.yaml @@ -19,4 +19,4 @@ spec: volumes: - name: cert secret: - secretName: $(SERVICE_NAME)-cert + secretName: capi-kubeadm-bootstrap-webhook-service-cert diff --git a/bootstrap/kubeadm/config/default/webhookcainjection_patch.yaml b/bootstrap/kubeadm/config/default/webhookcainjection_patch.yaml index 04c08d027f3d..b94de4bd6bf0 100644 --- a/bootstrap/kubeadm/config/default/webhookcainjection_patch.yaml +++ b/bootstrap/kubeadm/config/default/webhookcainjection_patch.yaml @@ -4,11 +4,11 @@ kind: MutatingWebhookConfiguration metadata: name: mutating-webhook-configuration annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: name: validating-webhook-configuration annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME diff --git a/bootstrap/kubeadm/config/manager/manager.yaml b/bootstrap/kubeadm/config/manager/manager.yaml index d54386179159..f96899a5fe7e 100644 --- a/bootstrap/kubeadm/config/manager/manager.yaml +++ b/bootstrap/kubeadm/config/manager/manager.yaml @@ -20,15 +20,32 @@ spec: - /manager args: - "--leader-elect" - - "--metrics-bind-addr=localhost:8080" - - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=false},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false}" + - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" + - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" + - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false}" - "--bootstrap-token-ttl=${KUBEADM_BOOTSTRAP_TOKEN_TTL:=15m}" image: controller:latest name: manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid ports: - containerPort: 9440 name: healthz protocol: TCP + - containerPort: 8443 + name: metrics + protocol: TCP readinessProbe: httpGet: path: /readyz @@ -45,6 +62,7 @@ spec: privileged: false runAsUser: 65532 runAsGroup: 65532 + terminationMessagePolicy: FallbackToLogsOnError terminationGracePeriodSeconds: 10 serviceAccountName: manager tolerations: diff --git a/bootstrap/kubeadm/config/rbac/role.yaml b/bootstrap/kubeadm/config/rbac/role.yaml index df06d19c536f..bf38a7dd6bd3 100644 --- a/bootstrap/kubeadm/config/rbac/role.yaml +++ b/bootstrap/kubeadm/config/rbac/role.yaml @@ -2,14 +2,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: - "" resources: - configmaps - - events - secrets verbs: - create @@ -19,11 +17,22 @@ rules: - patch - update - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create - apiGroups: - bootstrap.cluster.x-k8s.io resources: - kubeadmconfigs - - kubeadmconfigs/finalizers - kubeadmconfigs/status verbs: - create @@ -47,3 +56,9 @@ rules: - get - list - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create diff --git a/bootstrap/kubeadm/config/webhook/kustomizeconfig.yaml b/bootstrap/kubeadm/config/webhook/kustomizeconfig.yaml index 25e21e3c963f..6d782eb8e350 100644 --- a/bootstrap/kubeadm/config/webhook/kustomizeconfig.yaml +++ b/bootstrap/kubeadm/config/webhook/kustomizeconfig.yaml @@ -21,5 +21,3 @@ namespace: path: webhooks/clientConfig/service/namespace create: true -varReference: -- path: metadata/annotations diff --git a/bootstrap/kubeadm/config/webhook/manifests.yaml b/bootstrap/kubeadm/config/webhook/manifests.yaml index 26a66f4b590c..e7ffd80e2cd8 100644 --- a/bootstrap/kubeadm/config/webhook/manifests.yaml +++ b/bootstrap/kubeadm/config/webhook/manifests.yaml @@ -2,7 +2,6 @@ apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: - creationTimestamp: null name: mutating-webhook-configuration webhooks: - admissionReviewVersions: @@ -51,7 +50,6 @@ webhooks: apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - creationTimestamp: null name: validating-webhook-configuration webhooks: - admissionReviewVersions: diff --git a/bootstrap/kubeadm/controllers/alias.go b/bootstrap/kubeadm/controllers/alias.go index 7f8b99b136ca..7c9a2aff9b0f 100644 --- a/bootstrap/kubeadm/controllers/alias.go +++ b/bootstrap/kubeadm/controllers/alias.go @@ -25,6 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" kubeadmbootstrapcontrollers "sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/controllers" + "sigs.k8s.io/cluster-api/controllers/remote" ) // Following types provides access to reconcilers implemented in internal/controllers, thus @@ -37,7 +38,10 @@ const ( // KubeadmConfigReconciler reconciles a KubeadmConfig object. type KubeadmConfigReconciler struct { - Client client.Client + Client client.Client + SecretCachingClient client.Client + + Tracker *remote.ClusterCacheTracker // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string @@ -49,8 +53,10 @@ type KubeadmConfigReconciler struct { // SetupWithManager sets up the reconciler with the Manager. func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&kubeadmbootstrapcontrollers.KubeadmConfigReconciler{ - Client: r.Client, - WatchFilterValue: r.WatchFilterValue, - TokenTTL: r.TokenTTL, + Client: r.Client, + SecretCachingClient: r.SecretCachingClient, + Tracker: r.Tracker, + WatchFilterValue: r.WatchFilterValue, + TokenTTL: r.TokenTTL, }).SetupWithManager(ctx, mgr, options) } diff --git a/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go b/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go index 95059b1775df..626d2479888d 100644 --- a/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go +++ b/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go @@ -20,7 +20,7 @@ import ( "testing" . "github.com/onsi/gomega" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/util/certs" @@ -68,7 +68,7 @@ func TestNewInitControlPlaneAdditionalFileEncodings(t *testing.T) { } out, err := NewInitControlPlane(cpinput) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) expectedFiles := []string{ `- path: /tmp/my-path @@ -114,7 +114,7 @@ func TestNewInitControlPlaneCommands(t *testing.T) { } out, err := NewInitControlPlane(cpinput) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) expectedCommands := []string{ `"\"echo $(date) ': hello world!'\""`, @@ -141,8 +141,8 @@ func TestNewInitControlPlaneDiskMounts(t *testing.T) { { Device: "test-device", Layout: true, - Overwrite: pointer.Bool(false), - TableType: pointer.String("gpt"), + Overwrite: ptr.To(false), + TableType: ptr.To("gpt"), }, }, Filesystems: []bootstrapv1.Filesystem{ @@ -164,7 +164,7 @@ func TestNewInitControlPlaneDiskMounts(t *testing.T) { } out, err := NewInitControlPlane(cpinput) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) expectedDiskSetup := `disk_setup: test-device: @@ -224,7 +224,7 @@ func TestNewJoinControlPlaneAdditionalFileEncodings(t *testing.T) { } out, err := NewJoinControlPlane(cpinput) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) expectedFiles := []string{ `- path: /tmp/my-path @@ -266,7 +266,7 @@ func TestNewJoinControlPlaneExperimentalRetry(t *testing.T) { } out, err := NewJoinControlPlane(cpinput) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) expectedFiles := []string{ `- path: ` + retriableJoinScriptName + ` diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index e12305a9c14e..e1b2e7da5389 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -22,7 +22,7 @@ import ( "strconv" "time" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -31,13 +31,15 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" + bootstrapapi "k8s.io/cluster-bootstrap/token/api" + bootstrapsecretutil "k8s.io/cluster-bootstrap/util/secrets" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/source" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" @@ -49,6 +51,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/remote" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/internal/util/taints" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -58,11 +61,6 @@ import ( "sigs.k8s.io/cluster-api/util/secret" ) -const ( - // KubeadmConfigControllerName defines the controller used when creating clients. - KubeadmConfigControllerName = "kubeadmconfig-controller" -) - const ( // DefaultTokenTTL is the default TTL used for tokens. DefaultTokenTTL = 15 * time.Minute @@ -74,22 +72,23 @@ type InitLocker interface { Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool } -// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigs;kubeadmconfigs/status;kubeadmconfigs/finalizers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigs;kubeadmconfigs/status,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machinesets;machines;machines/status;machinepools;machinepools/status,verbs=get;list;watch -// +kubebuilder:rbac:groups="",resources=secrets;events;configmaps,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=secrets;configmaps,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=events,verbs=create // KubeadmConfigReconciler reconciles a KubeadmConfig object. type KubeadmConfigReconciler struct { - Client client.Client - KubeadmInitLock InitLocker + Client client.Client + SecretCachingClient client.Client + Tracker *remote.ClusterCacheTracker + KubeadmInitLock InitLocker // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string // TokenTTL is the amount of time a bootstrap token (and therefore a KubeadmConfig) will be valid. TokenTTL time.Duration - - remoteClientGetter remote.ClusterClientGetter } // Scope is a scoped struct used during reconciliation. @@ -105,9 +104,6 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl if r.KubeadmInitLock == nil { r.KubeadmInitLock = locking.NewControlPlaneInitMutex(mgr.GetClient()) } - if r.remoteClientGetter == nil { - r.remoteClientGetter = remote.NewClusterClient - } if r.TokenTTL == 0 { r.TokenTTL = DefaultTokenTTL } @@ -116,34 +112,31 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl For(&bootstrapv1.KubeadmConfig{}). WithOptions(options). Watches( - &source.Kind{Type: &clusterv1.Machine{}}, + &clusterv1.Machine{}, handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc), ).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)) if feature.Gates.Enabled(feature.MachinePool) { b = b.Watches( - &source.Kind{Type: &expv1.MachinePool{}}, + &expv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc), - ).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)) - } - - c, err := b.Build(r) - if err != nil { - return errors.Wrap(err, "failed setting up with a controller manager") + ) } - err = c.Watch( - &source.Kind{Type: &clusterv1.Cluster{}}, + b = b.Watches( + &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmConfigs), - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), - predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), + builder.WithPredicates( + predicates.All(ctrl.LoggerFrom(ctx), + predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), + predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), + ), ), ) - if err != nil { - return errors.Wrap(err, "failed adding Watch for Clusters to controller manager") - } + if err := b.Complete(r); err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } return nil } @@ -151,37 +144,38 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, rerr error) { log := ctrl.LoggerFrom(ctx) - // Lookup the kubeadm config + // Look up the kubeadm config config := &bootstrapv1.KubeadmConfig{} if err := r.Client.Get(ctx, req.NamespacedName, config); err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil } - log.Error(err, "Failed to get config") - return ctrl.Result{}, err - } - - // AddOwners adds the owners of KubeadmConfig as k/v pairs to the logger. - // Specifically, it will add KubeadmControlPlane, MachineSet and MachineDeployment. - ctx, log, err := clog.AddOwners(ctx, r.Client, config) - if err != nil { return ctrl.Result{}, err } // Look up the owner of this kubeadm config if there is one - configOwner, err := bsutil.GetConfigOwner(ctx, r.Client, config) + configOwner, err := bsutil.GetTypedConfigOwner(ctx, r.Client, config) if apierrors.IsNotFound(err) { // Could not find the owner yet, this is not an error and will rereconcile when the owner gets set. return ctrl.Result{}, nil } if err != nil { - log.Error(err, "Failed to get owner") - return ctrl.Result{}, err + return ctrl.Result{}, errors.Wrapf(err, "failed to get owner") } if configOwner == nil { return ctrl.Result{}, nil } log = log.WithValues(configOwner.GetKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion()) + ctx = ctrl.LoggerInto(ctx, log) + + if configOwner.GetKind() == "Machine" { + // AddOwners adds the owners of Machine as k/v pairs to the logger. + // Specifically, it will add KubeadmControlPlane, MachineSet and MachineDeployment. + ctx, log, err = clog.AddOwners(ctx, r.Client, configOwner) + if err != nil { + return ctrl.Result{}, err + } + } log = log.WithValues("Cluster", klog.KRef(configOwner.GetNamespace(), configOwner.ClusterName())) ctx = ctrl.LoggerInto(ctx, log) @@ -235,12 +229,28 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) } if err := patchHelper.Patch(ctx, config, patchOpts...); err != nil { - log.Error(rerr, "Failed to patch config") - if rerr == nil { - rerr = err - } + rerr = kerrors.NewAggregate([]error{rerr, err}) } }() + + // Ignore deleted KubeadmConfigs. + if !config.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + + res, err := r.reconcile(ctx, scope, cluster, config, configOwner) + if err != nil && errors.Is(err, remote.ErrClusterLocked) { + // Requeue if the reconcile failed because the ClusterCacheTracker was locked for + // the current cluster because of concurrent access. + log.V(5).Info("Requeuing because another worker has the lock on the ClusterCacheTracker") + return ctrl.Result{RequeueAfter: time.Minute}, nil + } + return res, err +} + +func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, cluster *clusterv1.Cluster, config *bootstrapv1.KubeadmConfig, configOwner *bsutil.ConfigOwner) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + // Ensure the bootstrap secret associated with this KubeadmConfig has the correct ownerReference. if err := r.ensureBootstrapSecretOwnersRef(ctx, scope); err != nil { return ctrl.Result{}, err @@ -265,7 +275,7 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques // If the BootstrapToken has been generated for a join but the config owner has no nodeRefs, // this indicates that the node has not yet joined and the token in the join config has not // been consumed and it may need a refresh. - return r.refreshBootstrapToken(ctx, config, cluster) + return r.refreshBootstrapTokenIfNeeded(ctx, config, cluster) } if configOwner.IsMachinePool() { // If the BootstrapToken has been generated and infrastructure is ready but the configOwner is a MachinePool, @@ -303,29 +313,58 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques return r.joinWorker(ctx, scope) } -func (r *KubeadmConfigReconciler) refreshBootstrapToken(ctx context.Context, config *bootstrapv1.KubeadmConfig, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *KubeadmConfigReconciler) refreshBootstrapTokenIfNeeded(ctx context.Context, config *bootstrapv1.KubeadmConfig, cluster *clusterv1.Cluster) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) token := config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token - remoteClient, err := r.remoteClientGetter(ctx, KubeadmConfigControllerName, r.Client, util.ObjectKey(cluster)) + remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { - log.Error(err, "Error creating remote cluster client") return ctrl.Result{}, err } - log.Info("Refreshing token until the infrastructure has a chance to consume it") - if err := refreshToken(ctx, remoteClient, token, r.TokenTTL); err != nil { + secret, err := getToken(ctx, remoteClient, token) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to get bootstrap token secret in order to refresh it") + } + log = log.WithValues("Secret", klog.KObj(secret)) + + secretExpiration := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenExpirationKey) + if secretExpiration == "" { + log.Info(fmt.Sprintf("Token has no valid value for %s, writing new expiration timestamp", bootstrapapi.BootstrapTokenExpirationKey)) + } else { + // Assuming UTC, since we create the label value with that timezone + expiration, err := time.Parse(time.RFC3339, secretExpiration) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "can't parse expiration time of bootstrap token") + } + + now := time.Now().UTC() + skipTokenRefreshIfExpiringAfter := now.Add(r.skipTokenRefreshIfExpiringAfter()) + if expiration.After(skipTokenRefreshIfExpiringAfter) { + log.V(3).Info("Token needs no refresh", "tokenExpiresInSeconds", expiration.Sub(now).Seconds()) + return ctrl.Result{ + RequeueAfter: r.tokenCheckRefreshOrRotationInterval(), + }, nil + } + } + + // Extend TTL for existing token + newExpiration := time.Now().UTC().Add(r.TokenTTL).Format(time.RFC3339) + secret.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(newExpiration) + log.Info("Refreshing token until the infrastructure has a chance to consume it", "oldExpiration", secretExpiration, "newExpiration", newExpiration) + err = remoteClient.Update(ctx, secret) + if err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to refresh bootstrap token") } return ctrl.Result{ - RequeueAfter: r.TokenTTL / 2, + RequeueAfter: r.tokenCheckRefreshOrRotationInterval(), }, nil } func (r *KubeadmConfigReconciler) rotateMachinePoolBootstrapToken(ctx context.Context, config *bootstrapv1.KubeadmConfig, cluster *clusterv1.Cluster, scope *Scope) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) log.V(2).Info("Config is owned by a MachinePool, checking if token should be rotated") - remoteClient, err := r.remoteClientGetter(ctx, KubeadmConfigControllerName, r.Client, util.ObjectKey(cluster)) + remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { return ctrl.Result{}, err } @@ -349,7 +388,7 @@ func (r *KubeadmConfigReconciler) rotateMachinePoolBootstrapToken(ctx context.Co return r.joinWorker(ctx, scope) } return ctrl.Result{ - RequeueAfter: r.TokenTTL / 3, + RequeueAfter: r.tokenCheckRefreshOrRotationInterval(), }, nil } @@ -410,11 +449,12 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex if scope.Config.Spec.InitConfiguration == nil { scope.Config.Spec.InitConfiguration = &bootstrapv1.InitConfiguration{ TypeMeta: metav1.TypeMeta{ - APIVersion: "kubeadm.k8s.io/v1beta1", + APIVersion: "kubeadm.k8s.io/v1beta3", Kind: "InitConfiguration", }, } } + initdata, err := kubeadmtypes.MarshalInitConfigurationForVersion(scope.Config.Spec.InitConfiguration, parsedVersion) if err != nil { scope.Error(err, "Failed to marshal init configuration") @@ -424,7 +464,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex if scope.Config.Spec.ClusterConfiguration == nil { scope.Config.Spec.ClusterConfiguration = &bootstrapv1.ClusterConfiguration{ TypeMeta: metav1.TypeMeta{ - APIVersion: "kubeadm.k8s.io/v1beta1", + APIVersion: "kubeadm.k8s.io/v1beta3", Kind: "ClusterConfiguration", }, } @@ -445,13 +485,15 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex // Otherwise rely on certificates generated by the ControlPlane controller. // Note: A cluster does not have a ControlPlane reference when using standalone CP machines. if scope.Cluster.Spec.ControlPlaneRef == nil { - err = certificates.LookupOrGenerate( + err = certificates.LookupOrGenerateCached( ctx, + r.SecretCachingClient, r.Client, util.ObjectKey(scope.Cluster), *metav1.NewControllerRef(scope.Config, bootstrapv1.GroupVersion.WithKind("KubeadmConfig"))) } else { - err = certificates.Lookup(ctx, + err = certificates.LookupCached(ctx, + r.SecretCachingClient, r.Client, util.ObjectKey(scope.Cluster)) } @@ -523,8 +565,9 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) scope.Info("Creating BootstrapData for the worker node") certificates := secret.NewCertificatesForWorker(scope.Config.Spec.JoinConfiguration.CACertPath) - err := certificates.Lookup( + err := certificates.LookupCached( ctx, + r.SecretCachingClient, r.Client, util.ObjectKey(scope.Cluster), ) @@ -551,7 +594,16 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kubernetesVersion) } - joinData, err := kubeadmtypes.MarshalJoinConfigurationForVersion(scope.Config.Spec.JoinConfiguration, parsedVersion) + // Add the node uninitialized taint to the list of taints. + // DeepCopy the JoinConfiguration to prevent updating the actual KubeadmConfig. + // Do not modify the KubeadmConfig in etcd as this is a temporary taint that will be dropped after the node + // is initialized by ClusterAPI. + joinConfiguration := scope.Config.Spec.JoinConfiguration.DeepCopy() + if !taints.HasTaint(joinConfiguration.NodeRegistration.Taints, clusterv1.NodeUninitializedTaint) { + joinConfiguration.NodeRegistration.Taints = append(joinConfiguration.NodeRegistration.Taints, clusterv1.NodeUninitializedTaint) + } + + joinData, err := kubeadmtypes.MarshalJoinConfigurationForVersion(joinConfiguration, parsedVersion) if err != nil { scope.Error(err, "Failed to marshal join configuration") return ctrl.Result{}, err @@ -613,7 +665,9 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) scope.Error(err, "Failed to store bootstrap data") return ctrl.Result{}, err } - return ctrl.Result{}, nil + + // Ensure reconciling this object again so we keep refreshing the bootstrap token until it is consumed + return ctrl.Result{RequeueAfter: r.tokenCheckRefreshOrRotationInterval()}, nil } func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *Scope) (ctrl.Result, error) { @@ -628,8 +682,9 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S } certificates := secret.NewControlPlaneJoinCerts(scope.Config.Spec.ClusterConfiguration) - err := certificates.Lookup( + err := certificates.LookupCached( ctx, + r.SecretCachingClient, r.Client, util.ObjectKey(scope.Cluster), ) @@ -717,7 +772,8 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S return ctrl.Result{}, err } - return ctrl.Result{}, nil + // Ensure reconciling this object again so we keep refreshing the bootstrap token until it is consumed + return ctrl.Result{RequeueAfter: r.tokenCheckRefreshOrRotationInterval()}, nil } // resolveFiles maps .Spec.Files into cloudinit.Files, resolving any object references @@ -797,9 +853,30 @@ func (r *KubeadmConfigReconciler) resolveSecretPasswordContent(ctx context.Conte return data, nil } +// skipTokenRefreshIfExpiringAfter returns a duration. If the token's expiry timestamp is after +// `now + skipTokenRefreshIfExpiringAfter()`, it does not yet need a refresh. +func (r *KubeadmConfigReconciler) skipTokenRefreshIfExpiringAfter() time.Duration { + // Choose according to how often reconciliation is "woken up" by `tokenCheckRefreshOrRotationInterval`. + // Reconciliation should get triggered at least two times, i.e. have two chances to refresh the token (in case of + // one temporary failure), while the token is not refreshed. + return r.TokenTTL * 5 / 6 +} + +// tokenCheckRefreshOrRotationInterval defines when to trigger a reconciliation loop again to refresh or rotate a token. +func (r *KubeadmConfigReconciler) tokenCheckRefreshOrRotationInterval() time.Duration { + // This interval defines how often the reconciler should get triggered. + // + // `r.TokenTTL / 3` means reconciliation gets triggered at least 3 times within the expiry time of the token. The + // third call may be too late, so the first/second call have a chance to extend the expiry (refresh/rotate), + // allowing for one temporary failure. + // + // Related to `skipTokenRefreshIfExpiringAfter` and also token rotation (which is different from refreshing). + return r.TokenTTL / 3 +} + // ClusterToKubeadmConfigs is a handler.ToRequestsFunc to be used to enqueue // requests for reconciliation of KubeadmConfigs. -func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(o client.Object) []ctrl.Request { +func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(ctx context.Context, o client.Object) []ctrl.Request { result := []ctrl.Request{} c, ok := o.(*clusterv1.Cluster) @@ -815,7 +892,7 @@ func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(o client.Object) []ctr } machineList := &clusterv1.MachineList{} - if err := r.Client.List(context.TODO(), machineList, selectors...); err != nil { + if err := r.Client.List(ctx, machineList, selectors...); err != nil { return nil } @@ -829,7 +906,7 @@ func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(o client.Object) []ctr if feature.Gates.Enabled(feature.MachinePool) { machinePoolList := &expv1.MachinePoolList{} - if err := r.Client.List(context.TODO(), machinePoolList, selectors...); err != nil { + if err := r.Client.List(ctx, machinePoolList, selectors...); err != nil { return nil } @@ -847,7 +924,7 @@ func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(o client.Object) []ctr // MachineToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue // request for reconciliation of KubeadmConfig. -func (r *KubeadmConfigReconciler) MachineToBootstrapMapFunc(o client.Object) []ctrl.Request { +func (r *KubeadmConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request { m, ok := o.(*clusterv1.Machine) if !ok { panic(fmt.Sprintf("Expected a Machine but got a %T", o)) @@ -863,7 +940,7 @@ func (r *KubeadmConfigReconciler) MachineToBootstrapMapFunc(o client.Object) []c // MachinePoolToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue // request for reconciliation of KubeadmConfig. -func (r *KubeadmConfigReconciler) MachinePoolToBootstrapMapFunc(o client.Object) []ctrl.Request { +func (r *KubeadmConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request { m, ok := o.(*expv1.MachinePool) if !ok { panic(fmt.Sprintf("Expected a MachinePool but got a %T", o)) @@ -915,12 +992,12 @@ func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluste apiServerEndpoint = cluster.Spec.ControlPlaneEndpoint.String() config.Spec.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint - log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint", "APIServerEndpoint", apiServerEndpoint) + log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint", "apiServerEndpoint", apiServerEndpoint) } // if BootstrapToken already contains a token, respect it; otherwise create a new bootstrap token for the node to join if config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token == "" { - remoteClient, err := r.remoteClientGetter(ctx, KubeadmConfigControllerName, r.Client, util.ObjectKey(cluster)) + remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { return ctrl.Result{}, err } @@ -953,39 +1030,39 @@ func (r *KubeadmConfigReconciler) reconcileTopLevelObjectSettings(ctx context.Co // then use Cluster's ControlPlaneEndpoint as a control plane endpoint for the Kubernetes cluster. if config.Spec.ClusterConfiguration.ControlPlaneEndpoint == "" && cluster.Spec.ControlPlaneEndpoint.IsValid() { config.Spec.ClusterConfiguration.ControlPlaneEndpoint = cluster.Spec.ControlPlaneEndpoint.String() - log.V(3).Info("Altering ClusterConfiguration.ControlPlaneEndpoint", "ControlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint) + log.V(3).Info("Altering ClusterConfiguration.ControlPlaneEndpoint", "controlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint) } // If there are no ClusterName defined in ClusterConfiguration, use Cluster.Name if config.Spec.ClusterConfiguration.ClusterName == "" { config.Spec.ClusterConfiguration.ClusterName = cluster.Name - log.V(3).Info("Altering ClusterConfiguration.ClusterName", "ClusterName", config.Spec.ClusterConfiguration.ClusterName) + log.V(3).Info("Altering ClusterConfiguration.ClusterName", "clusterName", config.Spec.ClusterConfiguration.ClusterName) } // If there are no Network settings defined in ClusterConfiguration, use ClusterNetwork settings, if defined if cluster.Spec.ClusterNetwork != nil { if config.Spec.ClusterConfiguration.Networking.DNSDomain == "" && cluster.Spec.ClusterNetwork.ServiceDomain != "" { config.Spec.ClusterConfiguration.Networking.DNSDomain = cluster.Spec.ClusterNetwork.ServiceDomain - log.V(3).Info("Altering ClusterConfiguration.Networking.DNSDomain", "DNSDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain) + log.V(3).Info("Altering ClusterConfiguration.Networking.DNSDomain", "dnsDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain) } if config.Spec.ClusterConfiguration.Networking.ServiceSubnet == "" && cluster.Spec.ClusterNetwork.Services != nil && len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 { config.Spec.ClusterConfiguration.Networking.ServiceSubnet = cluster.Spec.ClusterNetwork.Services.String() - log.V(3).Info("Altering ClusterConfiguration.Networking.ServiceSubnet", "ServiceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet) + log.V(3).Info("Altering ClusterConfiguration.Networking.ServiceSubnet", "serviceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet) } if config.Spec.ClusterConfiguration.Networking.PodSubnet == "" && cluster.Spec.ClusterNetwork.Pods != nil && len(cluster.Spec.ClusterNetwork.Pods.CIDRBlocks) > 0 { config.Spec.ClusterConfiguration.Networking.PodSubnet = cluster.Spec.ClusterNetwork.Pods.String() - log.V(3).Info("Altering ClusterConfiguration.Networking.PodSubnet", "PodSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet) + log.V(3).Info("Altering ClusterConfiguration.Networking.PodSubnet", "podSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet) } } // If there are no KubernetesVersion settings defined in ClusterConfiguration, use Version from machine, if defined if config.Spec.ClusterConfiguration.KubernetesVersion == "" && machine.Spec.Version != nil { config.Spec.ClusterConfiguration.KubernetesVersion = *machine.Spec.Version - log.V(3).Info("Altering ClusterConfiguration.KubernetesVersion", "KubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion) + log.V(3).Info("Altering ClusterConfiguration.KubernetesVersion", "kubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion) } } @@ -1007,7 +1084,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope Kind: "KubeadmConfig", Name: scope.Config.Name, UID: scope.Config.UID, - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }, }, @@ -1024,12 +1101,12 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope if !apierrors.IsAlreadyExists(err) { return errors.Wrapf(err, "failed to create bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name) } - log.Info("bootstrap data secret for KubeadmConfig already exists, updating", "Secret", klog.KObj(secret)) + log.Info("Bootstrap data secret for KubeadmConfig already exists, updating", "Secret", klog.KObj(secret)) if err := r.Client.Update(ctx, secret); err != nil { return errors.Wrapf(err, "failed to update bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name) } } - scope.Config.Status.DataSecretName = pointer.String(secret.Name) + scope.Config.Status.DataSecretName = ptr.To(secret.Name) scope.Config.Status.Ready = true conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition) return nil @@ -1038,7 +1115,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope // Ensure the bootstrap secret has the KubeadmConfig as a controller OwnerReference. func (r *KubeadmConfigReconciler) ensureBootstrapSecretOwnersRef(ctx context.Context, scope *Scope) error { secret := &corev1.Secret{} - err := r.Client.Get(ctx, client.ObjectKey{Namespace: scope.Config.Namespace, Name: scope.Config.Name}, secret) + err := r.SecretCachingClient.Get(ctx, client.ObjectKey{Namespace: scope.Config.Namespace, Name: scope.Config.Name}, secret) if err != nil { // If the secret has not been created yet return early. if apierrors.IsNotFound(err) { @@ -1051,15 +1128,15 @@ func (r *KubeadmConfigReconciler) ensureBootstrapSecretOwnersRef(ctx context.Con return errors.Wrapf(err, "failed to add KubeadmConfig %s as ownerReference to bootstrap Secret %s", scope.ConfigOwner.GetName(), secret.GetName()) } if c := metav1.GetControllerOf(secret); c != nil && c.Kind != "KubeadmConfig" { - secret.OwnerReferences = util.RemoveOwnerRef(secret.OwnerReferences, *c) + secret.SetOwnerReferences(util.RemoveOwnerRef(secret.GetOwnerReferences(), *c)) } - secret.OwnerReferences = util.EnsureOwnerRef(secret.OwnerReferences, metav1.OwnerReference{ + secret.SetOwnerReferences(util.EnsureOwnerRef(secret.GetOwnerReferences(), metav1.OwnerReference{ APIVersion: bootstrapv1.GroupVersion.String(), Kind: "KubeadmConfig", UID: scope.Config.UID, Name: scope.Config.Name, - Controller: pointer.Bool(true), - }) + Controller: ptr.To(true), + })) err = patchHelper.Patch(ctx, secret) if err != nil { return errors.Wrapf(err, "could not add KubeadmConfig %s as ownerReference to bootstrap Secret %s", scope.ConfigOwner.GetName(), secret.GetName()) diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go index 7e855189197e..0021cb928784 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go @@ -33,7 +33,7 @@ func TestKubeadmConfigReconciler(t *testing.T) { g := NewWithT(t) ns, err := env.CreateNamespace(ctx, "test-kubeadm-config-reconciler") - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) cluster := builder.Cluster(ns.Name, "cluster1").Build() g.Expect(env.Create(ctx, cluster)).To(Succeed()) @@ -47,7 +47,8 @@ func TestKubeadmConfigReconciler(t *testing.T) { }(cluster, machine, config, ns) reconciler := KubeadmConfigReconciler{ - Client: env, + Client: env, + SecretCachingClient: secretCachingClient, } t.Log("Calling reconcile should requeue") result, err := reconciler.Reconcile(ctx, ctrl.Request{ diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index 4038c3a16f88..d3147305f4a7 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -20,27 +20,28 @@ import ( "bytes" "context" "fmt" - "reflect" "testing" "time" ignition "github.com/flatcar/ignition/config/v2_3" + "github.com/go-logr/logr" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" bootstrapapi "k8s.io/cluster-bootstrap/token/api" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/yaml" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" bootstrapbuilder "sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/builder" - fakeremote "sigs.k8s.io/cluster-api/controllers/remote/fake" + "sigs.k8s.io/cluster-api/controllers/remote" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/test/builder" @@ -76,11 +77,12 @@ func TestKubeadmConfigReconciler_MachineToBootstrapMapFuncReturn(t *testing.T) { } fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build() reconciler := &KubeadmConfigReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, } for i := 0; i < 3; i++ { o := machineObjs[i] - configs := reconciler.MachineToBootstrapMapFunc(o) + configs := reconciler.MachineToBootstrapMapFunc(ctx, o) if i == 1 { g.Expect(configs[0].Name).To(Equal(expectedConfigName)) } else { @@ -108,7 +110,8 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfKubeadmConfigIsReady(t * myclient := fake.NewClientBuilder().WithObjects(objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -118,7 +121,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfKubeadmConfigIsReady(t * }, } result, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) } @@ -134,12 +137,12 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi WithClusterName(clusterName). WithBootstrapTemplate(bootstrapbuilder.KubeadmConfig(metav1.NamespaceDefault, "cfg").Unstructured()). Build() - machine.Spec.Bootstrap.DataSecretName = pointer.String("something") + machine.Spec.Bootstrap.DataSecretName = ptr.To("something") config := newKubeadmConfig(metav1.NamespaceDefault, "cfg") config.SetOwnerReferences(util.EnsureOwnerRef(config.GetOwnerReferences(), metav1.OwnerReference{ - APIVersion: machine.APIVersion, - Kind: machine.Kind, + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", Name: machine.Name, UID: machine.UID, })) @@ -158,10 +161,11 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi secret, cluster, } - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -174,9 +178,9 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi key := client.ObjectKeyFromObject(config) actual := &corev1.Secret{} - t.Run("KubeadmConfig ownerReference is added on first reconcile", func(t *testing.T) { + t.Run("KubeadmConfig ownerReference is added on first reconcile", func(*testing.T) { _, err = k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(myclient.Get(ctx, key, actual)).To(Succeed()) @@ -186,9 +190,9 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi g.Expect(controllerOwner.Name).To(Equal(config.Name)) }) - t.Run("KubeadmConfig ownerReference re-reconciled without error", func(t *testing.T) { + t.Run("KubeadmConfig ownerReference re-reconciled without error", func(*testing.T) { _, err = k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(myclient.Get(ctx, key, actual)).To(Succeed()) @@ -197,21 +201,21 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi g.Expect(controllerOwner.Kind).To(Equal(config.Kind)) g.Expect(controllerOwner.Name).To(Equal(config.Name)) }) - t.Run("non-KubeadmConfig controller OwnerReference is replaced", func(t *testing.T) { + t.Run("non-KubeadmConfig controller OwnerReference is replaced", func(*testing.T) { g.Expect(myclient.Get(ctx, key, actual)).To(Succeed()) actual.SetOwnerReferences([]metav1.OwnerReference{ { - APIVersion: machine.APIVersion, - Kind: machine.Kind, + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", Name: machine.Name, UID: machine.UID, - Controller: pointer.Bool(true), + Controller: ptr.To(true), }}) g.Expect(myclient.Update(ctx, actual)).To(Succeed()) _, err = k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(myclient.Get(ctx, key, actual)).To(Succeed()) @@ -239,7 +243,8 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfReferencedMachineIsNotFoun myclient := fake.NewClientBuilder().WithObjects(objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -249,7 +254,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfReferencedMachineIsNotFoun }, } _, err := k.Reconcile(ctx, request) - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) } // If the machine has bootstrap data secret reference, there is no need to generate more bootstrap data. @@ -263,7 +268,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasDataSecretName WithClusterName("cluster1"). WithBootstrapTemplate(bootstrapbuilder.KubeadmConfig(metav1.NamespaceDefault, "cfg").Unstructured()). Build() - machine.Spec.Bootstrap.DataSecretName = pointer.String("something") + machine.Spec.Bootstrap.DataSecretName = ptr.To("something") config := newKubeadmConfig(metav1.NamespaceDefault, "cfg") addKubeadmConfigToMachine(config, machine) @@ -272,10 +277,11 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasDataSecretName machine, config, } - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -287,7 +293,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasDataSecretName result, err := k.Reconcile(ctx, request) actual := &bootstrapv1.KubeadmConfig{} g.Expect(myclient.Get(ctx, client.ObjectKey{Namespace: config.Namespace, Name: config.Name}, actual)).To(Succeed()) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) assertHasTrueCondition(g, myclient, request, bootstrapv1.DataSecretAvailableCondition) @@ -315,10 +321,11 @@ func TestKubeadmConfigReconciler_ReturnEarlyIfClusterInfraNotReady(t *testing.T) machine, config, } - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -330,8 +337,8 @@ func TestKubeadmConfigReconciler_ReturnEarlyIfClusterInfraNotReady(t *testing.T) expectedResult := reconcile.Result{} actualResult, actualError := k.Reconcile(ctx, request) - g.Expect(actualResult).To(Equal(expectedResult)) - g.Expect(actualError).NotTo(HaveOccurred()) + g.Expect(actualResult).To(BeComparableTo(expectedResult)) + g.Expect(actualError).ToNot(HaveOccurred()) assertHasFalseCondition(g, myclient, request, bootstrapv1.DataSecretAvailableCondition, clusterv1.ConditionSeverityInfo, bootstrapv1.WaitingForClusterInfrastructureReason) } @@ -352,7 +359,8 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasNoCluster(t *t myclient := fake.NewClientBuilder().WithObjects(objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -362,7 +370,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasNoCluster(t *t }, } _, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } // This does not expect an error, hoping that the associated cluster will be created. @@ -386,7 +394,8 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfAssociatedClusterIsNotFoun myclient := fake.NewClientBuilder().WithObjects(objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, + Client: myclient, + SecretCachingClient: myclient, } request := ctrl.Request{ @@ -396,7 +405,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfAssociatedClusterIsNotFoun }, } _, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } // If the control plane isn't initialized then there is no cluster for either a worker or control plane node to join. @@ -450,15 +459,16 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueJoiningNodesIfControlPlaneNotI t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - myclient := fake.NewClientBuilder().WithObjects(tc.objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(tc.objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } result, err := k.Reconcile(ctx, tc.request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(30 * time.Second)) assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.DataSecretAvailableCondition, clusterv1.ConditionSeverityInfo, clusterv1.WaitingForControlPlaneAvailableReason) @@ -492,12 +502,13 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) } objects = append(objects, createSecrets(t, cluster, controlPlaneInitConfig)...) - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, - remoteClientGetter: fakeremote.NewClusterClient, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -510,12 +521,12 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) g.Expect(myclient.Get(ctx, client.ObjectKey{Namespace: metav1.NamespaceDefault, Name: configName}, s)).ToNot(Succeed()) result, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, "control-plane-init-cfg", metav1.NamespaceDefault) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Ready).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) @@ -527,7 +538,7 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) g.Expect(s.Data["value"]).ToNot(BeEmpty()) // Ensure that we don't fail trying to refresh any bootstrap tokens _, err = k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } // If a control plane has no JoinConfiguration, then we will create a default and no error will occur. @@ -553,12 +564,13 @@ func TestKubeadmConfigReconciler_Reconcile_ErrorIfJoiningControlPlaneHasInvalidC controlPlaneJoinConfig, } objects = append(objects, createSecrets(t, cluster, controlPlaneInitConfig)...) - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, - remoteClientGetter: fakeremote.NewClusterClient, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -568,7 +580,7 @@ func TestKubeadmConfigReconciler_Reconcile_ErrorIfJoiningControlPlaneHasInvalidC }, } _, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) actualConfig := &bootstrapv1.KubeadmConfig{} g.Expect(myclient.Get(ctx, client.ObjectKey{Namespace: controlPlaneJoinConfig.Namespace, Name: controlPlaneJoinConfig.Name}, actualConfig)).To(Succeed()) assertHasTrueCondition(g, myclient, request, bootstrapv1.DataSecretAvailableCondition) @@ -597,11 +609,12 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp } objects = append(objects, createSecrets(t, cluster, controlPlaneInitConfig)...) - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -611,7 +624,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp }, } result, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(10 * time.Second)) @@ -675,11 +688,12 @@ func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsRe config, } objects = append(objects, createSecrets(t, cluster, config)...) - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, - remoteClientGetter: fakeremote.NewClusterClient, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -689,12 +703,12 @@ func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsRe }, } result, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, rt.configName, metav1.NamespaceDefault) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Ready).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) @@ -702,7 +716,7 @@ func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsRe l := &corev1.SecretList{} err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(l.Items).To(HaveLen(1)) }) } @@ -726,7 +740,7 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { name: "Join a worker node with a fully compiled kubeadm config object", machinePool: newWorkerMachinePoolForCluster(cluster), configName: "workerpool-join-cfg", - configBuilder: func(namespace, name string) *bootstrapv1.KubeadmConfig { + configBuilder: func(namespace, _ string) *bootstrapv1.KubeadmConfig { return newWorkerJoinKubeadmConfig(namespace, "workerpool-join-cfg") }, }, @@ -752,11 +766,12 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { config, } objects = append(objects, createSecrets(t, cluster, config)...) - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, - remoteClientGetter: fakeremote.NewClusterClient, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -766,19 +781,19 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { }, } result, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, rt.configName, metav1.NamespaceDefault) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Ready).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) l := &corev1.SecretList{} err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(l.Items).To(HaveLen(1)) }) } @@ -851,12 +866,13 @@ func TestBootstrapDataFormat(t *testing.T) { } objects = append(objects, createSecrets(t, cluster, config)...) - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, - remoteClientGetter: fakeremote.NewClusterClient, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ @@ -867,11 +883,11 @@ func TestBootstrapDataFormat(t *testing.T) { // Reconcile the KubeadmConfig resource. _, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // Verify the KubeadmConfig resource state is correct. cfg, err := getKubeadmConfig(myclient, configName, metav1.NamespaceDefault) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Ready).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) @@ -883,7 +899,7 @@ func TestBootstrapDataFormat(t *testing.T) { } secret := &corev1.Secret{} err = myclient.Get(ctx, key, secret) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // Verify the format field of the bootstrap data secret is correct. g.Expect(string(secret.Data["format"])).To(Equal(string(tc.format))) @@ -896,11 +912,11 @@ func TestBootstrapDataFormat(t *testing.T) { // TODO: Verify the YAML document is valid cloud-config? var out interface{} err = yaml.Unmarshal(data, &out) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) case bootstrapv1.Ignition: // Verify the bootstrap data is valid Ignition. _, reports, err := ignition.Parse(data) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(reports.IsFatal()).NotTo(BeTrue()) } }) @@ -932,11 +948,12 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { } objects = append(objects, createSecrets(t, cluster, initConfig)...) - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, - remoteClientGetter: fakeremote.NewClusterClient, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ NamespacedName: client.ObjectKey{ @@ -957,7 +974,7 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { Kind: "KubeadmConfig", Name: workerJoinConfig.Name, UID: workerJoinConfig.UID, - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }, }, @@ -970,12 +987,12 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { err := myclient.Create(ctx, secret) g.Expect(err).ToNot(HaveOccurred()) result, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, "worker-join-cfg", metav1.NamespaceDefault) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Ready).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) @@ -1009,12 +1026,14 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { } objects = append(objects, createSecrets(t, cluster, initConfig)...) - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}, &clusterv1.Machine{}).Build() + remoteClient := fake.NewClientBuilder().Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, - TokenTTL: DefaultTokenTTL, - remoteClientGetter: fakeremote.NewClusterClient, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, + TokenTTL: DefaultTokenTTL, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, remoteClient, remoteClient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), } request := ctrl.Request{ NamespacedName: client.ObjectKey{ @@ -1023,12 +1042,11 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { }, } result, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) - g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result.RequeueAfter).To(Equal(k.TokenTTL / 3)) cfg, err := getKubeadmConfig(myclient, "worker-join-cfg", metav1.NamespaceDefault) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Ready).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) @@ -1040,29 +1058,62 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { }, } result, err = k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) - g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result.RequeueAfter).To(Equal(k.TokenTTL / 3)) cfg, err = getKubeadmConfig(myclient, "control-plane-join-cfg", metav1.NamespaceDefault) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Ready).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) l := &corev1.SecretList{} - err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(l.Items).To(HaveLen(2)) + g.Expect(remoteClient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))).To(Succeed()) + g.Expect(l.Items).To(HaveLen(2)) // control plane vs. worker - // ensure that the token is refreshed... + t.Log("Ensure that the token secret is not updated while it's still fresh") tokenExpires := make([][]byte, len(l.Items)) for i, item := range l.Items { tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] } - <-time.After(1 * time.Second) + for _, req := range []ctrl.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: metav1.NamespaceDefault, + Name: "worker-join-cfg", + }, + }, + { + NamespacedName: client.ObjectKey{ + Namespace: metav1.NamespaceDefault, + Name: "control-plane-join-cfg", + }, + }, + } { + result, err := k.Reconcile(ctx, req) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result.RequeueAfter).To(Equal(k.TokenTTL / 3)) + } + + l = &corev1.SecretList{} + g.Expect(remoteClient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))).To(Succeed()) + g.Expect(l.Items).To(HaveLen(2)) + + for i, item := range l.Items { + // No refresh should have happened since no time passed and the token is therefore still fresh + g.Expect(bytes.Equal(tokenExpires[i], item.Data[bootstrapapi.BootstrapTokenExpirationKey])).To(BeTrue()) + } + + t.Log("Ensure that the token secret is updated if expiration time is soon") + + for i, item := range l.Items { + // Simulate that expiry time is only TTL/2 from now. This should trigger a refresh. + item.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(time.Now().UTC().Add(k.TokenTTL / 2).Format(time.RFC3339)) + g.Expect(remoteClient.Update(ctx, &l.Items[i])).To(Succeed()) + tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] + } for _, req := range []ctrl.Request{ { @@ -1079,21 +1130,29 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { }, } { result, err := k.Reconcile(ctx, req) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result.RequeueAfter).NotTo(BeNumerically(">=", k.TokenTTL)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result.RequeueAfter).To(Equal(k.TokenTTL / 3)) } l = &corev1.SecretList{} - err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(remoteClient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))).To(Succeed()) g.Expect(l.Items).To(HaveLen(2)) for i, item := range l.Items { + // Refresh should have happened since expiration is soon g.Expect(bytes.Equal(tokenExpires[i], item.Data[bootstrapapi.BootstrapTokenExpirationKey])).To(BeFalse()) tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] } - // ...the infrastructure is marked "ready", but token should still be refreshed... + t.Log("If infrastructure is marked ready, the token should still be refreshed") + + for i, item := range l.Items { + // Simulate that expiry time is only TTL/2 from now. This should trigger a refresh. + item.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(time.Now().UTC().Add(k.TokenTTL / 2).Format(time.RFC3339)) + g.Expect(remoteClient.Update(ctx, &l.Items[i])).To(Succeed()) + tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] + } + patchHelper, err := patch.NewHelper(workerMachine, myclient) g.Expect(err).ShouldNot(HaveOccurred()) workerMachine.Status.InfrastructureReady = true @@ -1104,8 +1163,6 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { controlPlaneJoinMachine.Status.InfrastructureReady = true g.Expect(patchHelper.Patch(ctx, controlPlaneJoinMachine)).To(Succeed()) - <-time.After(1 * time.Second) - for _, req := range []ctrl.Request{ { NamespacedName: client.ObjectKey{ @@ -1121,21 +1178,29 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { }, } { result, err := k.Reconcile(ctx, req) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result.RequeueAfter).NotTo(BeNumerically(">=", k.TokenTTL)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result.RequeueAfter).To(Equal(k.TokenTTL / 3)) } l = &corev1.SecretList{} - err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(remoteClient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))).To(Succeed()) g.Expect(l.Items).To(HaveLen(2)) for i, item := range l.Items { + // Refresh should have happened since expiration is soon, even if infrastructure is ready g.Expect(bytes.Equal(tokenExpires[i], item.Data[bootstrapapi.BootstrapTokenExpirationKey])).To(BeFalse()) tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] } - // ...until the Nodes have actually joined the cluster and we get a nodeRef + t.Log("When the Nodes have actually joined the cluster and we get a nodeRef, no more refresh should happen") + + for i, item := range l.Items { + // Simulate that expiry time is only TTL/2 from now. This would normally trigger a refresh. + item.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(time.Now().UTC().Add(k.TokenTTL / 2).Format(time.RFC3339)) + g.Expect(remoteClient.Update(ctx, &l.Items[i])).To(Succeed()) + tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] + } + patchHelper, err = patch.NewHelper(workerMachine, myclient) g.Expect(err).ShouldNot(HaveOccurred()) workerMachine.Status.NodeRef = &corev1.ObjectReference{ @@ -1154,8 +1219,6 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { } g.Expect(patchHelper.Patch(ctx, controlPlaneJoinMachine)).To(Succeed()) - <-time.After(1 * time.Second) - for _, req := range []ctrl.Request{ { NamespacedName: client.ObjectKey{ @@ -1171,14 +1234,13 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { }, } { result, err := k.Reconcile(ctx, req) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) } l = &corev1.SecretList{} - err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(remoteClient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))).To(Succeed()) g.Expect(l.Items).To(HaveLen(2)) for i, item := range l.Items { @@ -1210,12 +1272,14 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { } objects = append(objects, createSecrets(t, cluster, initConfig)...) - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}, &expv1.MachinePool{}).Build() + remoteClient := fake.NewClientBuilder().Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, - TokenTTL: DefaultTokenTTL, - remoteClientGetter: fakeremote.NewClusterClient, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, + TokenTTL: DefaultTokenTTL, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, remoteClient, remoteClient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), } request := ctrl.Request{ NamespacedName: client.ObjectKey{ @@ -1224,82 +1288,99 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { }, } result, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) - g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result.RequeueAfter).To(Equal(k.TokenTTL / 3)) cfg, err := getKubeadmConfig(myclient, "workerpool-join-cfg", metav1.NamespaceDefault) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Ready).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) l := &corev1.SecretList{} - err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(remoteClient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))).To(Succeed()) g.Expect(l.Items).To(HaveLen(1)) - // ensure that the token is refreshed... + t.Log("Ensure that the token secret is not updated while it's still fresh") tokenExpires := make([][]byte, len(l.Items)) for i, item := range l.Items { tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] } - <-time.After(1 * time.Second) + result, err = k.Reconcile(ctx, request) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result.RequeueAfter).To(Equal(k.TokenTTL / 3)) - for _, req := range []ctrl.Request{ - { - NamespacedName: client.ObjectKey{ - Namespace: metav1.NamespaceDefault, - Name: "workerpool-join-cfg", - }, - }, - } { - result, err := k.Reconcile(ctx, req) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result.RequeueAfter).NotTo(BeNumerically(">=", k.TokenTTL)) + l = &corev1.SecretList{} + g.Expect(remoteClient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))).To(Succeed()) + g.Expect(l.Items).To(HaveLen(1)) + + for i, item := range l.Items { + // No refresh should have happened since no time passed and the token is therefore still fresh + g.Expect(bytes.Equal(tokenExpires[i], item.Data[bootstrapapi.BootstrapTokenExpirationKey])).To(BeTrue()) } + t.Log("Ensure that the token secret is updated if expiration time is soon") + + for i, item := range l.Items { + // Simulate that expiry time is only TTL*3/4 from now. This should trigger a refresh. + item.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(time.Now().UTC().Add(k.TokenTTL * 3 / 4).Format(time.RFC3339)) + g.Expect(remoteClient.Update(ctx, &l.Items[i])).To(Succeed()) + tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] + } + + result, err = k.Reconcile(ctx, request) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result.RequeueAfter).To(Equal(k.TokenTTL / 3)) + l = &corev1.SecretList{} - err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(remoteClient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))).To(Succeed()) g.Expect(l.Items).To(HaveLen(1)) for i, item := range l.Items { + // Refresh should have happened since expiration is soon g.Expect(bytes.Equal(tokenExpires[i], item.Data[bootstrapapi.BootstrapTokenExpirationKey])).To(BeFalse()) tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] } - // ...the infrastructure is marked "ready", but token should still be refreshed... + t.Log("If infrastructure is marked ready, the token should still be refreshed") + + for i, item := range l.Items { + // Simulate that expiry time is only TTL*3/4 from now. This should trigger a refresh. + item.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(time.Now().UTC().Add(k.TokenTTL * 3 / 4).Format(time.RFC3339)) + g.Expect(remoteClient.Update(ctx, &l.Items[i])).To(Succeed()) + tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] + } + patchHelper, err := patch.NewHelper(workerMachinePool, myclient) g.Expect(err).ShouldNot(HaveOccurred()) workerMachinePool.Status.InfrastructureReady = true g.Expect(patchHelper.Patch(ctx, workerMachinePool, patch.WithStatusObservedGeneration{})).To(Succeed()) - <-time.After(1 * time.Second) - - request = ctrl.Request{ - NamespacedName: client.ObjectKey{ - Namespace: metav1.NamespaceDefault, - Name: "workerpool-join-cfg", - }, - } result, err = k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result.RequeueAfter).NotTo(BeNumerically(">=", k.TokenTTL)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result.RequeueAfter).To(Equal(k.TokenTTL / 3)) l = &corev1.SecretList{} - err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(remoteClient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))).To(Succeed()) g.Expect(l.Items).To(HaveLen(1)) for i, item := range l.Items { + // Refresh should have happened since expiration is soon, even if infrastructure is ready g.Expect(bytes.Equal(tokenExpires[i], item.Data[bootstrapapi.BootstrapTokenExpirationKey])).To(BeFalse()) tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] } - // ...until all nodes have joined + t.Log("When the Nodes have actually joined the cluster and we get a nodeRef, no more refresh should happen") + + for i, item := range l.Items { + // Simulate that expiry time is only TTL*3/4 from now. This would normally trigger a refresh. + item.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(time.Now().UTC().Add(k.TokenTTL * 3 / 4).Format(time.RFC3339)) + g.Expect(remoteClient.Update(ctx, &l.Items[i])).To(Succeed()) + tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] + } + workerMachinePool.Status.NodeRefs = []corev1.ObjectReference{ { Kind: "Node", @@ -1309,32 +1390,26 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { } g.Expect(patchHelper.Patch(ctx, workerMachinePool, patch.WithStatusObservedGeneration{})).To(Succeed()) - <-time.After(1 * time.Second) - - request = ctrl.Request{ - NamespacedName: client.ObjectKey{ - Namespace: metav1.NamespaceDefault, - Name: "workerpool-join-cfg", - }, - } result, err = k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.RequeueAfter).To(Equal(k.TokenTTL / 3)) l = &corev1.SecretList{} - err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(remoteClient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))).To(Succeed()) g.Expect(l.Items).To(HaveLen(1)) for i, item := range l.Items { g.Expect(bytes.Equal(tokenExpires[i], item.Data[bootstrapapi.BootstrapTokenExpirationKey])).To(BeTrue()) } - // before token expires, it should rotate it - tokenExpires[0] = []byte(time.Now().UTC().Add(k.TokenTTL / 5).Format(time.RFC3339)) - l.Items[0].Data[bootstrapapi.BootstrapTokenExpirationKey] = tokenExpires[0] - err = myclient.Update(ctx, &l.Items[0]) - g.Expect(err).NotTo(HaveOccurred()) + t.Log("Token must be rotated before it expires") + + for i, item := range l.Items { + // Simulate that expiry time is only TTL*4/10 from now. This should trigger rotation. + item.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(time.Now().UTC().Add(k.TokenTTL * 4 / 10).Format(time.RFC3339)) + g.Expect(remoteClient.Update(ctx, &l.Items[i])).To(Succeed()) + tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey] + } request = ctrl.Request{ NamespacedName: client.ObjectKey{ @@ -1343,13 +1418,12 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { }, } result, err = k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result.RequeueAfter).To(Equal(k.TokenTTL / 3)) l = &corev1.SecretList{} - err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(l.Items).To(HaveLen(2)) + g.Expect(remoteClient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))).To(Succeed()) + g.Expect(l.Items).To(HaveLen(2)) // old and new token foundOld := false foundNew := true for _, item := range l.Items { @@ -1357,7 +1431,7 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { foundOld = true } else { expirationTime, err := time.Parse(time.RFC3339, string(item.Data[bootstrapapi.BootstrapTokenExpirationKey])) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(expirationTime).Should(BeTemporally("~", time.Now().UTC().Add(k.TokenTTL), 10*time.Second)) foundNew = true } @@ -1368,12 +1442,6 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { // Ensure the discovery portion of the JoinConfiguration gets generated correctly. func TestKubeadmConfigReconciler_Reconcile_DiscoveryReconcileBehaviors(t *testing.T) { - k := &KubeadmConfigReconciler{ - Client: fake.NewClientBuilder().Build(), - KubeadmInitLock: &myInitLocker{}, - remoteClientGetter: fakeremote.NewClusterClient, - } - caHash := []string{"...."} bootstrapToken := bootstrapv1.Discovery{ BootstrapToken: &bootstrapv1.BootstrapTokenDiscovery{ @@ -1489,7 +1557,7 @@ func TestKubeadmConfigReconciler_Reconcile_DiscoveryReconcileBehaviors(t *testin }, validateDiscovery: func(g *WithT, c *bootstrapv1.KubeadmConfig) error { d := c.Spec.JoinConfiguration.Discovery - g.Expect(reflect.DeepEqual(d.BootstrapToken.CACertHashes, caHash)).To(BeTrue()) + g.Expect(d.BootstrapToken.CACertHashes).To(BeComparableTo(caHash)) return nil }, }, @@ -1499,12 +1567,20 @@ func TestKubeadmConfigReconciler_Reconcile_DiscoveryReconcileBehaviors(t *testin t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) + fakeClient := fake.NewClientBuilder().Build() + k := &KubeadmConfigReconciler{ + Client: fakeClient, + SecretCachingClient: fakeClient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: tc.cluster.Name, Namespace: tc.cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, + } + res, err := k.reconcileDiscovery(ctx, tc.cluster, tc.config, secret.Certificates{}) g.Expect(res.IsZero()).To(BeTrue()) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) err = tc.validateDiscovery(g, tc.config) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } @@ -1544,9 +1620,9 @@ func TestKubeadmConfigReconciler_Reconcile_DiscoveryReconcileFailureBehaviors(t g := NewWithT(t) res, err := k.reconcileDiscovery(ctx, tc.cluster, tc.config, secret.Certificates{}) - g.Expect(res).To(Equal(tc.result)) + g.Expect(res).To(BeComparableTo(tc.result)) if tc.err == nil { - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) } else { g.Expect(err).To(Equal(tc.err)) } @@ -1595,7 +1671,7 @@ func TestKubeadmConfigReconciler_Reconcile_DynamicDefaultsForClusterConfiguratio }, machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("otherVersion"), + Version: ptr.To("otherVersion"), }, }, }, @@ -1621,7 +1697,7 @@ func TestKubeadmConfigReconciler_Reconcile_DynamicDefaultsForClusterConfiguratio }, machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("myversion"), + Version: ptr.To("myversion"), }, }, }, @@ -1710,24 +1786,25 @@ func TestKubeadmConfigReconciler_Reconcile_AlwaysCheckCAVerificationUnlessReques myclient := fake.NewClientBuilder().WithObjects(objects...).Build() reconciler := KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, - remoteClientGetter: fakeremote.NewClusterClient, + Client: myclient, + SecretCachingClient: myclient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + KubeadmInitLock: &myInitLocker{}, } wc := newWorkerJoinKubeadmConfig(metav1.NamespaceDefault, "worker-join-cfg") wc.Spec.JoinConfiguration.Discovery.BootstrapToken = tc.discovery key := client.ObjectKey{Namespace: wc.Namespace, Name: wc.Name} err := myclient.Create(ctx, wc) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) req := ctrl.Request{NamespacedName: key} _, err = reconciler.Reconcile(ctx, req) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) cfg := &bootstrapv1.KubeadmConfig{} err = myclient.Get(ctx, key, cfg) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Spec.JoinConfiguration.Discovery.BootstrapToken.UnsafeSkipCAVerification).To(Equal(tc.skipCAVerification)) }) } @@ -1764,9 +1841,10 @@ func TestKubeadmConfigReconciler_ClusterToKubeadmConfigs(t *testing.T) { } fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build() reconciler := &KubeadmConfigReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, } - configs := reconciler.ClusterToKubeadmConfigs(cluster) + configs := reconciler.ClusterToKubeadmConfigs(ctx, cluster) names := make([]string, 6) for i := range configs { names[i] = configs[i].Name @@ -1803,14 +1881,15 @@ func TestKubeadmConfigReconciler_Reconcile_DoesNotFailIfCASecretsAlreadyExist(t } fakec := fake.NewClientBuilder().WithObjects(cluster, m, c, scrt).Build() reconciler := &KubeadmConfigReconciler{ - Client: fakec, - KubeadmInitLock: &myInitLocker{}, + Client: fakec, + SecretCachingClient: fakec, + KubeadmInitLock: &myInitLocker{}, } req := ctrl.Request{ NamespacedName: client.ObjectKey{Namespace: metav1.NamespaceDefault, Name: configName}, } _, err := reconciler.Reconcile(ctx, req) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } // Exactly one control plane machine initializes if there are multiple control plane machines defined. @@ -1835,10 +1914,11 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali controlPlaneInitMachineSecond, controlPlaneInitConfigSecond, } - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -1848,7 +1928,7 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali }, } result, err := k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) @@ -1859,7 +1939,7 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali }, } result, err = k.Reconcile(ctx, request) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(30 * time.Second)) confList := &bootstrapv1.KubeadmConfigList{} @@ -1901,10 +1981,11 @@ func TestKubeadmConfigReconciler_Reconcile_PatchWhenErrorOccurred(t *testing.T) objects = append(objects, s) } - myclient := fake.NewClientBuilder().WithObjects(objects...).Build() + myclient := fake.NewClientBuilder().WithObjects(objects...).WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } request := ctrl.Request{ @@ -1920,7 +2001,7 @@ func TestKubeadmConfigReconciler_Reconcile_PatchWhenErrorOccurred(t *testing.T) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, "control-plane-init-cfg", metav1.NamespaceDefault) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // check if the kubeadm config has been patched g.Expect(cfg.Spec.InitConfiguration).ToNot(BeNil()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) @@ -2039,8 +2120,9 @@ func TestKubeadmConfigReconciler_ResolveFiles(t *testing.T) { myclient := fake.NewClientBuilder().WithObjects(tc.objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } // make a list of files we expect to be sourced from secrets @@ -2055,8 +2137,8 @@ func TestKubeadmConfigReconciler_ResolveFiles(t *testing.T) { } files, err := k.resolveFiles(ctx, tc.cfg) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(files).To(Equal(tc.expect)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(files).To(BeComparableTo(tc.expect)) for _, file := range tc.cfg.Spec.Files { if contentFrom[file.Path] { g.Expect(file.ContentFrom).NotTo(BeNil()) @@ -2165,8 +2247,9 @@ func TestKubeadmConfigReconciler_ResolveUsers(t *testing.T) { myclient := fake.NewClientBuilder().WithObjects(tc.objects...).Build() k := &KubeadmConfigReconciler{ - Client: myclient, - KubeadmInitLock: &myInitLocker{}, + Client: myclient, + SecretCachingClient: myclient, + KubeadmInitLock: &myInitLocker{}, } // make a list of password we expect to be sourced from secrets @@ -2181,8 +2264,8 @@ func TestKubeadmConfigReconciler_ResolveUsers(t *testing.T) { } users, err := k.resolveUsers(ctx, tc.cfg) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(users).To(Equal(tc.expect)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(users).To(BeComparableTo(tc.expect)) for _, user := range tc.cfg.Spec.Users { if passwdFrom[user.Name] { g.Expect(user.PasswdFrom).NotTo(BeNil()) @@ -2220,7 +2303,7 @@ func newMachinePool(cluster *clusterv1.Cluster, name string) *expv1.MachinePool m := builder.MachinePool(cluster.Namespace, name). WithClusterName(cluster.Name). WithLabels(map[string]string{clusterv1.ClusterNameLabel: cluster.Name}). - WithBootstrapTemplate(bootstrapbuilder.KubeadmConfig(cluster.Namespace, "conf1").Unstructured()). + WithBootstrap(bootstrapbuilder.KubeadmConfig(cluster.Namespace, "conf1").Unstructured()). WithVersion("1.19.1"). Build() return m diff --git a/bootstrap/kubeadm/internal/controllers/suite_test.go b/bootstrap/kubeadm/internal/controllers/suite_test.go index 865f39ef5fcc..dce30d9334f7 100644 --- a/bootstrap/kubeadm/internal/controllers/suite_test.go +++ b/bootstrap/kubeadm/internal/controllers/suite_test.go @@ -17,22 +17,45 @@ limitations under the License. package controllers import ( + "context" + "fmt" "os" "testing" + corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api/internal/test/envtest" ) var ( - env *envtest.Environment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() + secretCachingClient client.Client ) func TestMain(m *testing.M) { + setupReconcilers := func(_ context.Context, mgr ctrl.Manager) { + var err error + secretCachingClient, err = client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + panic(fmt.Sprintf("unable to create secretCachingClient: %v", err)) + } + } + os.Exit(envtest.Run(ctx, envtest.RunInput{ - M: m, - SetupEnv: func(e *envtest.Environment) { env = e }, + M: m, + ManagerUncachedObjs: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + SetupEnv: func(e *envtest.Environment) { env = e }, + SetupReconcilers: setupReconcilers, })) } diff --git a/bootstrap/kubeadm/internal/controllers/token.go b/bootstrap/kubeadm/internal/controllers/token.go index 7fc2be7586a3..c138b4a6cdd3 100644 --- a/bootstrap/kubeadm/internal/controllers/token.go +++ b/bootstrap/kubeadm/internal/controllers/token.go @@ -87,17 +87,6 @@ func getToken(ctx context.Context, c client.Client, token string) (*corev1.Secre return secret, nil } -// refreshToken extends the TTL for an existing token. -func refreshToken(ctx context.Context, c client.Client, token string, ttl time.Duration) error { - secret, err := getToken(ctx, c, token) - if err != nil { - return err - } - secret.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(time.Now().UTC().Add(ttl).Format(time.RFC3339)) - - return c.Update(ctx, secret) -} - // shouldRotate returns true if an existing token is past half of its TTL and should to be rotated. func shouldRotate(ctx context.Context, c client.Client, token string, ttl time.Duration) (bool, error) { secret, err := getToken(ctx, c, token) diff --git a/bootstrap/kubeadm/internal/ignition/clc/clc.go b/bootstrap/kubeadm/internal/ignition/clc/clc.go index 4c4fb7a08391..b7cbaa1b43bd 100644 --- a/bootstrap/kubeadm/internal/ignition/clc/clc.go +++ b/bootstrap/kubeadm/internal/ignition/clc/clc.go @@ -96,6 +96,7 @@ systemd: Description=kubeadm # Run only once. After successful run, this file is moved to /tmp/. ConditionPathExists=/etc/kubeadm.yml + After=network.target [Service] # To not restart the unit when it exits, as it is expected. Type=oneshot diff --git a/bootstrap/kubeadm/internal/ignition/clc/clc_test.go b/bootstrap/kubeadm/internal/ignition/clc/clc_test.go index e61a5bdb2b2a..23ae5c53d7f7 100644 --- a/bootstrap/kubeadm/internal/ignition/clc/clc_test.go +++ b/bootstrap/kubeadm/internal/ignition/clc/clc_test.go @@ -23,7 +23,7 @@ import ( ignition "github.com/flatcar/ignition/config/v2_3" "github.com/flatcar/ignition/config/v2_3/types" "github.com/google/go-cmp/cmp" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/cloudinit" @@ -77,7 +77,7 @@ func TestRender(t *testing.T) { PostKubeadmCommands: postKubeadmCommands, KubeadmCommand: "kubeadm join", NTP: &bootstrapv1.NTP{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), Servers: []string{ "foo.bar", "baz", @@ -86,13 +86,13 @@ func TestRender(t *testing.T) { Users: []bootstrapv1.User{ { Name: "foo", - Gecos: pointer.String("Foo B. Bar"), - Groups: pointer.String("foo, bar"), - HomeDir: pointer.String("/home/foo"), - Shell: pointer.String("/bin/false"), - Passwd: pointer.String("$6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/"), - PrimaryGroup: pointer.String("foo"), - Sudo: pointer.String("ALL=(ALL) NOPASSWD:ALL"), + Gecos: ptr.To("Foo B. Bar"), + Groups: ptr.To("foo, bar"), + HomeDir: ptr.To("/home/foo"), + Shell: ptr.To("/bin/false"), + Passwd: ptr.To("$6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/"), + PrimaryGroup: ptr.To("foo"), + Sudo: ptr.To("ALL=(ALL) NOPASSWD:ALL"), SSHAuthorizedKeys: []string{ "foo", "bar", @@ -104,8 +104,8 @@ func TestRender(t *testing.T) { { Device: "/dev/disk/azure/scsi1/lun0", Layout: true, - Overwrite: pointer.Bool(true), - TableType: pointer.String("gpt"), + Overwrite: ptr.To(true), + TableType: ptr.To("gpt"), }, }, Filesystems: []bootstrapv1.Filesystem{ @@ -114,7 +114,7 @@ func TestRender(t *testing.T) { Filesystem: "ext4", Label: "test_disk", ExtraOpts: []string{"-F", "-E", "lazy_itable_init=1,lazy_journal_init=1"}, - Overwrite: pointer.Bool(true), + Overwrite: ptr.To(true), }, }, }, @@ -147,7 +147,7 @@ func TestRender(t *testing.T) { }, HomeDir: "/home/foo", Name: "foo", - PasswordHash: pointer.String("$6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/"), + PasswordHash: ptr.To("$6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/"), PrimaryGroup: "foo", SSHAuthorizedKeys: []types.SSHAuthorizedKey{ "foo", @@ -175,7 +175,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,foo%20ALL%3D(ALL)%20NOPASSWD%3AALL%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -189,7 +189,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,foo%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -201,7 +201,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0A%0Apre-command%0Aanother-pre-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A%0A%0Akubeadm%20join%0Amkdir%20-p%20%2Frun%2Fcluster-api%20%26%26%20echo%20success%20%3E%20%2Frun%2Fcluster-api%2Fbootstrap-success.complete%0Amv%20%2Fetc%2Fkubeadm.yml%20%2Ftmp%2F%0A%0Apost-kubeadm-command%0Aanother-post-kubeamd-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A", }, - Mode: pointer.Int(448), + Mode: ptr.To(448), }, }, { @@ -213,7 +213,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,---%0Afoo%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -225,7 +225,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23%20Common%20pool%0Aserver%20foo.bar%0Aserver%20baz%0A%0A%23%20Warning%3A%20Using%20default%20NTP%20settings%20will%20leave%20your%20NTP%0A%23%20server%20accessible%20to%20all%20hosts%20on%20the%20Internet.%0A%0A%23%20If%20you%20want%20to%20deny%20all%20machines%20(including%20your%20own)%0A%23%20from%20accessing%20the%20NTP%20server%2C%20uncomment%3A%0A%23restrict%20default%20ignore%0A%0A%23%20Default%20configuration%3A%0A%23%20-%20Allow%20only%20time%20queries%2C%20at%20a%20limited%20rate%2C%20sending%20KoD%20when%20in%20excess.%0A%23%20-%20Allow%20all%20local%20queries%20(IPv4%2C%20IPv6)%0Arestrict%20default%20nomodify%20nopeer%20noquery%20notrap%20limited%20kod%0Arestrict%20127.0.0.1%0Arestrict%20%5B%3A%3A1%5D%0A", }, - Mode: pointer.Int(420), + Mode: ptr.To(420), }, }, }, @@ -234,7 +234,7 @@ func TestRender(t *testing.T) { Mount: &types.Mount{ Device: "/dev/disk/azure/scsi1/lun0", Format: "ext4", - Label: pointer.String("test_disk"), + Label: ptr.To("test_disk"), Options: []types.MountOption{ "-F", "-E", @@ -249,17 +249,17 @@ func TestRender(t *testing.T) { Systemd: types.Systemd{ Units: []types.Unit{ { - Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", - Enabled: pointer.Bool(true), + Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\nAfter=network.target\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", + Enabled: ptr.To(true), Name: "kubeadm.service", }, { - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), Name: "ntpd.service", }, { Contents: "[Unit]\nDescription = Mount test_disk\n\n[Mount]\nWhat=/dev/disk/azure/scsi1/lun0\nWhere=/var/lib/testdir\nOptions=foo\n\n[Install]\nWantedBy=multi-user.target\n", - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), Name: "var-lib-testdir.mount", }, }, @@ -275,11 +275,11 @@ func TestRender(t *testing.T) { Users: []bootstrapv1.User{ { Name: "foo", - LockPassword: pointer.Bool(false), + LockPassword: ptr.To(false), }, { Name: "bar", - LockPassword: pointer.Bool(false), + LockPassword: ptr.To(false), }, }, }, @@ -308,7 +308,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23%20Use%20most%20defaults%20for%20sshd%20configuration.%0ASubsystem%20sftp%20internal-sftp%0AClientAliveInterval%20180%0AUseDNS%20no%0AUsePAM%20yes%0APrintLastLog%20no%20%23%20handled%20by%20PAM%0APrintMotd%20no%20%23%20handled%20by%20PAM%0A%0AMatch%20User%20foo%2Cbar%0A%20%20PasswordAuthentication%20yes%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -320,7 +320,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0A%0Apre-command%0Aanother-pre-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A%0A%0Akubeadm%20join%0Amkdir%20-p%20%2Frun%2Fcluster-api%20%26%26%20echo%20success%20%3E%20%2Frun%2Fcluster-api%2Fbootstrap-success.complete%0Amv%20%2Fetc%2Fkubeadm.yml%20%2Ftmp%2F%0A%0Apost-kubeadm-command%0Aanother-post-kubeamd-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A", }, - Mode: pointer.Int(448), + Mode: ptr.To(448), }, }, { @@ -332,7 +332,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,---%0Afoo%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, }, @@ -340,8 +340,8 @@ func TestRender(t *testing.T) { Systemd: types.Systemd{ Units: []types.Unit{ { - Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", - Enabled: pointer.Bool(true), + Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\nAfter=network.target\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", + Enabled: ptr.To(true), Name: "kubeadm.service", }, }, @@ -381,7 +381,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,foo%0A"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -391,7 +391,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,foo%0A"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -403,7 +403,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0A%0Apre-command%0Aanother-pre-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A%0A%0Akubeadm%20join%0Amkdir%20-p%20%2Frun%2Fcluster-api%20%26%26%20echo%20success%20%3E%20%2Frun%2Fcluster-api%2Fbootstrap-success.complete%0Amv%20%2Fetc%2Fkubeadm.yml%20%2Ftmp%2F%0A%0Apost-kubeadm-command%0Aanother-post-kubeamd-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A", }, - Mode: pointer.Int(448), + Mode: ptr.To(448), }, }, { @@ -415,7 +415,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,---%0Afoo%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, }, @@ -423,8 +423,8 @@ func TestRender(t *testing.T) { Systemd: types.Systemd{ Units: []types.Unit{ { - Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", - Enabled: pointer.Bool(true), + Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\nAfter=network.target\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", + Enabled: ptr.To(true), Name: "kubeadm.service", }, }, @@ -479,7 +479,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -492,7 +492,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -505,7 +505,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -518,7 +518,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -530,7 +530,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0A%0Apre-command%0Aanother-pre-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A%0A%0Akubeadm%20join%0Amkdir%20-p%20%2Frun%2Fcluster-api%20%26%26%20echo%20success%20%3E%20%2Frun%2Fcluster-api%2Fbootstrap-success.complete%0Amv%20%2Fetc%2Fkubeadm.yml%20%2Ftmp%2F%0A%0Apost-kubeadm-command%0Aanother-post-kubeamd-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A", }, - Mode: pointer.Int(448), + Mode: ptr.To(448), }, }, { @@ -542,7 +542,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,---%0Afoo%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, }, @@ -550,8 +550,8 @@ func TestRender(t *testing.T) { Systemd: types.Systemd{ Units: []types.Unit{ { - Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", - Enabled: pointer.Bool(true), + Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\nAfter=network.target\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", + Enabled: ptr.To(true), Name: "kubeadm.service", }, }, diff --git a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go index ace07f149c34..ff249e4ccbd7 100644 --- a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go +++ b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go @@ -181,8 +181,8 @@ func (s *semaphore) setMetadata(cluster *clusterv1.Cluster) { }, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: cluster.APIVersion, - Kind: cluster.Kind, + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", Name: cluster.Name, UID: cluster.UID, }, diff --git a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go index 636afbfc934c..9d2feeff6e3a 100644 --- a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go +++ b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go @@ -176,7 +176,7 @@ func TestControlPlaneInitMutex_LockWithMachineDeletion(t *testing.T) { }, } for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { l := &ControlPlaneInitMutex{ client: tc.client, } @@ -203,7 +203,7 @@ func TestControlPlaneInitMutex_LockWithMachineDeletion(t *testing.T) { }, cm)).To(Succeed()) info, err := semaphore{cm}.information() - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(info.MachineName).To(Equal(tc.expectedMachineName)) return nil diff --git a/bootstrap/kubeadm/internal/webhooks/doc.go b/bootstrap/kubeadm/internal/webhooks/doc.go new file mode 100644 index 000000000000..571963a3ae0e --- /dev/null +++ b/bootstrap/kubeadm/internal/webhooks/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhooks provides the validating webhooks for KubeadmConfig and KubeadmConfigTemplate. +package webhooks diff --git a/bootstrap/kubeadm/internal/webhooks/kubeadmconfig.go b/bootstrap/kubeadm/internal/webhooks/kubeadmconfig.go new file mode 100644 index 000000000000..55bf51754f0e --- /dev/null +++ b/bootstrap/kubeadm/internal/webhooks/kubeadmconfig.go @@ -0,0 +1,95 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhooks + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" +) + +func (webhook *KubeadmConfig) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(&bootstrapv1.KubeadmConfig{}). + WithDefaulter(webhook). + WithValidator(webhook). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/mutate-bootstrap-cluster-x-k8s-io-v1beta1-kubeadmconfig,mutating=true,failurePolicy=fail,groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigs,versions=v1beta1,name=default.kubeadmconfig.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta1-kubeadmconfig,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigs,versions=v1beta1,name=validation.kubeadmconfig.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +// KubeadmConfig implements a validation and defaulting webhook for KubeadmConfig. +type KubeadmConfig struct{} + +var _ webhook.CustomValidator = &KubeadmConfig{} +var _ webhook.CustomDefaulter = &KubeadmConfig{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type. +func (webhook *KubeadmConfig) Default(_ context.Context, obj runtime.Object) error { + c, ok := obj.(*bootstrapv1.KubeadmConfig) + if !ok { + return apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmConfig but got a %T", obj)) + } + + c.Spec.Default() + + return nil +} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (webhook *KubeadmConfig) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + c, ok := obj.(*bootstrapv1.KubeadmConfig) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmConfig but got a %T", obj)) + } + + return nil, webhook.validate(c.Spec, c.Name) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (webhook *KubeadmConfig) ValidateUpdate(_ context.Context, _, newObj runtime.Object) (admission.Warnings, error) { + newC, ok := newObj.(*bootstrapv1.KubeadmConfig) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmConfig but got a %T", newObj)) + } + + return nil, webhook.validate(newC.Spec, newC.Name) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (webhook *KubeadmConfig) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +func (webhook *KubeadmConfig) validate(c bootstrapv1.KubeadmConfigSpec, name string) error { + allErrs := c.Validate(field.NewPath("spec")) + + if len(allErrs) == 0 { + return nil + } + + return apierrors.NewInvalid(bootstrapv1.GroupVersion.WithKind("KubeadmConfig").GroupKind(), name, allErrs) +} diff --git a/bootstrap/kubeadm/api/v1beta1/kubeadmconfig_webhook_test.go b/bootstrap/kubeadm/internal/webhooks/kubeadmconfig_test.go similarity index 55% rename from bootstrap/kubeadm/api/v1beta1/kubeadmconfig_webhook_test.go rename to bootstrap/kubeadm/internal/webhooks/kubeadmconfig_test.go index cd01a2940577..bdb6fa0eee75 100644 --- a/bootstrap/kubeadm/api/v1beta1/kubeadmconfig_webhook_test.go +++ b/bootstrap/kubeadm/internal/webhooks/kubeadmconfig_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package webhooks import ( "testing" @@ -22,57 +22,62 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/feature" - utildefaulting "sigs.k8s.io/cluster-api/util/defaulting" + "sigs.k8s.io/cluster-api/internal/webhooks/util" ) +var ctx = ctrl.SetupSignalHandler() + func TestKubeadmConfigDefault(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() g := NewWithT(t) - kubeadmConfig := &KubeadmConfig{ + kubeadmConfig := &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", }, - Spec: KubeadmConfigSpec{}, + Spec: bootstrapv1.KubeadmConfigSpec{}, } updateDefaultingKubeadmConfig := kubeadmConfig.DeepCopy() - updateDefaultingKubeadmConfig.Spec.Verbosity = pointer.Int32(4) - t.Run("for KubeadmConfig", utildefaulting.DefaultValidateTest(updateDefaultingKubeadmConfig)) + updateDefaultingKubeadmConfig.Spec.Verbosity = ptr.To[int32](4) + webhook := &KubeadmConfig{} + t.Run("for KubeadmConfig", util.CustomDefaultValidateTest(ctx, updateDefaultingKubeadmConfig, webhook)) - kubeadmConfig.Default() + g.Expect(webhook.Default(ctx, kubeadmConfig)).To(Succeed()) - g.Expect(kubeadmConfig.Spec.Format).To(Equal(CloudConfig)) + g.Expect(kubeadmConfig.Spec.Format).To(Equal(bootstrapv1.CloudConfig)) - ignitionKubeadmConfig := &KubeadmConfig{ + ignitionKubeadmConfig := &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", }, - Spec: KubeadmConfigSpec{ - Format: Ignition, + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.Ignition, }, } - ignitionKubeadmConfig.Default() - g.Expect(ignitionKubeadmConfig.Spec.Format).To(Equal(Ignition)) + g.Expect(webhook.Default(ctx, ignitionKubeadmConfig)).To(Succeed()) + g.Expect(ignitionKubeadmConfig.Spec.Format).To(Equal(bootstrapv1.Ignition)) } func TestKubeadmConfigValidate(t *testing.T) { cases := map[string]struct { - in *KubeadmConfig + in *bootstrapv1.KubeadmConfig enableIgnitionFeature bool expectErr bool }{ "valid content": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: metav1.NamespaceDefault, }, - Spec: KubeadmConfigSpec{ - Files: []File{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Files: []bootstrapv1.File{ { Content: "foo", }, @@ -81,16 +86,16 @@ func TestKubeadmConfigValidate(t *testing.T) { }, }, "valid contentFrom": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: metav1.NamespaceDefault, }, - Spec: KubeadmConfigSpec{ - Files: []File{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Files: []bootstrapv1.File{ { - ContentFrom: &FileSource{ - Secret: SecretFileSource{ + ContentFrom: &bootstrapv1.FileSource{ + Secret: bootstrapv1.SecretFileSource{ Name: "foo", Key: "bar", }, @@ -101,15 +106,15 @@ func TestKubeadmConfigValidate(t *testing.T) { }, }, "invalid content and contentFrom": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: metav1.NamespaceDefault, }, - Spec: KubeadmConfigSpec{ - Files: []File{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Files: []bootstrapv1.File{ { - ContentFrom: &FileSource{}, + ContentFrom: &bootstrapv1.FileSource{}, Content: "foo", }, }, @@ -118,16 +123,16 @@ func TestKubeadmConfigValidate(t *testing.T) { expectErr: true, }, "invalid contentFrom without name": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: metav1.NamespaceDefault, }, - Spec: KubeadmConfigSpec{ - Files: []File{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Files: []bootstrapv1.File{ { - ContentFrom: &FileSource{ - Secret: SecretFileSource{ + ContentFrom: &bootstrapv1.FileSource{ + Secret: bootstrapv1.SecretFileSource{ Key: "bar", }, }, @@ -139,16 +144,16 @@ func TestKubeadmConfigValidate(t *testing.T) { expectErr: true, }, "invalid contentFrom without key": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: metav1.NamespaceDefault, }, - Spec: KubeadmConfigSpec{ - Files: []File{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Files: []bootstrapv1.File{ { - ContentFrom: &FileSource{ - Secret: SecretFileSource{ + ContentFrom: &bootstrapv1.FileSource{ + Secret: bootstrapv1.SecretFileSource{ Name: "foo", }, }, @@ -160,13 +165,13 @@ func TestKubeadmConfigValidate(t *testing.T) { expectErr: true, }, "invalid with duplicate file path": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: metav1.NamespaceDefault, }, - Spec: KubeadmConfigSpec{ - Files: []File{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Files: []bootstrapv1.File{ { Content: "foo", }, @@ -179,31 +184,31 @@ func TestKubeadmConfigValidate(t *testing.T) { expectErr: true, }, "valid passwd": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: metav1.NamespaceDefault, }, - Spec: KubeadmConfigSpec{ - Users: []User{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Users: []bootstrapv1.User{ { - Passwd: pointer.String("foo"), + Passwd: ptr.To("foo"), }, }, }, }, }, "valid passwdFrom": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: metav1.NamespaceDefault, }, - Spec: KubeadmConfigSpec{ - Users: []User{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Users: []bootstrapv1.User{ { - PasswdFrom: &PasswdSource{ - Secret: SecretPasswdSource{ + PasswdFrom: &bootstrapv1.PasswdSource{ + Secret: bootstrapv1.SecretPasswdSource{ Name: "foo", Key: "bar", }, @@ -214,16 +219,16 @@ func TestKubeadmConfigValidate(t *testing.T) { }, }, "invalid passwd and passwdFrom": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: metav1.NamespaceDefault, }, - Spec: KubeadmConfigSpec{ - Users: []User{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Users: []bootstrapv1.User{ { - PasswdFrom: &PasswdSource{}, - Passwd: pointer.String("foo"), + PasswdFrom: &bootstrapv1.PasswdSource{}, + Passwd: ptr.To("foo"), }, }, }, @@ -231,20 +236,20 @@ func TestKubeadmConfigValidate(t *testing.T) { expectErr: true, }, "invalid passwdFrom without name": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: metav1.NamespaceDefault, }, - Spec: KubeadmConfigSpec{ - Users: []User{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Users: []bootstrapv1.User{ { - PasswdFrom: &PasswdSource{ - Secret: SecretPasswdSource{ + PasswdFrom: &bootstrapv1.PasswdSource{ + Secret: bootstrapv1.SecretPasswdSource{ Key: "bar", }, }, - Passwd: pointer.String("foo"), + Passwd: ptr.To("foo"), }, }, }, @@ -252,20 +257,20 @@ func TestKubeadmConfigValidate(t *testing.T) { expectErr: true, }, "invalid passwdFrom without key": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: metav1.NamespaceDefault, }, - Spec: KubeadmConfigSpec{ - Users: []User{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Users: []bootstrapv1.User{ { - PasswdFrom: &PasswdSource{ - Secret: SecretPasswdSource{ + PasswdFrom: &bootstrapv1.PasswdSource{ + Secret: bootstrapv1.SecretPasswdSource{ Name: "foo", }, }, - Passwd: pointer.String("foo"), + Passwd: ptr.To("foo"), }, }, }, @@ -274,41 +279,41 @@ func TestKubeadmConfigValidate(t *testing.T) { }, "Ignition field is set, format is not Ignition": { enableIgnitionFeature: true, - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "default", }, - Spec: KubeadmConfigSpec{ - Ignition: &IgnitionSpec{}, + Spec: bootstrapv1.KubeadmConfigSpec{ + Ignition: &bootstrapv1.IgnitionSpec{}, }, }, expectErr: true, }, "Ignition field is not set, format is Ignition": { enableIgnitionFeature: true, - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "default", }, - Spec: KubeadmConfigSpec{ - Format: Ignition, + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.Ignition, }, }, }, "format is Ignition, user is inactive": { enableIgnitionFeature: true, - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "default", }, - Spec: KubeadmConfigSpec{ - Format: Ignition, - Users: []User{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.Ignition, + Users: []bootstrapv1.User{ { - Inactive: pointer.Bool(true), + Inactive: ptr.To(true), }, }, }, @@ -317,17 +322,17 @@ func TestKubeadmConfigValidate(t *testing.T) { }, "format is Ignition, non-GPT partition configured": { enableIgnitionFeature: true, - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "default", }, - Spec: KubeadmConfigSpec{ - Format: Ignition, - DiskSetup: &DiskSetup{ - Partitions: []Partition{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.Ignition, + DiskSetup: &bootstrapv1.DiskSetup{ + Partitions: []bootstrapv1.Partition{ { - TableType: pointer.String("MS-DOS"), + TableType: ptr.To("MS-DOS"), }, }, }, @@ -337,40 +342,40 @@ func TestKubeadmConfigValidate(t *testing.T) { }, "format is Ignition, experimental retry join is set": { enableIgnitionFeature: true, - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "default", }, - Spec: KubeadmConfigSpec{ - Format: Ignition, + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.Ignition, UseExperimentalRetryJoin: true, }, }, expectErr: true, }, "feature gate disabled, format is Ignition": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "default", }, - Spec: KubeadmConfigSpec{ - Format: Ignition, + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.Ignition, }, }, expectErr: true, }, "feature gate disabled, Ignition field is set": { - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "default", }, - Spec: KubeadmConfigSpec{ - Format: Ignition, - Ignition: &IgnitionSpec{ - ContainerLinuxConfig: &ContainerLinuxConfig{}, + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.Ignition, + Ignition: &bootstrapv1.IgnitionSpec{ + ContainerLinuxConfig: &bootstrapv1.ContainerLinuxConfig{}, }, }, }, @@ -378,17 +383,17 @@ func TestKubeadmConfigValidate(t *testing.T) { }, "replaceFS specified with Ignition": { enableIgnitionFeature: true, - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "default", }, - Spec: KubeadmConfigSpec{ - Format: Ignition, - DiskSetup: &DiskSetup{ - Filesystems: []Filesystem{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.Ignition, + DiskSetup: &bootstrapv1.DiskSetup{ + Filesystems: []bootstrapv1.Filesystem{ { - ReplaceFS: pointer.String("ntfs"), + ReplaceFS: ptr.To("ntfs"), }, }, }, @@ -398,17 +403,17 @@ func TestKubeadmConfigValidate(t *testing.T) { }, "filesystem partition specified with Ignition": { enableIgnitionFeature: true, - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "default", }, - Spec: KubeadmConfigSpec{ - Format: Ignition, - DiskSetup: &DiskSetup{ - Filesystems: []Filesystem{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.Ignition, + DiskSetup: &bootstrapv1.DiskSetup{ + Filesystems: []bootstrapv1.Filesystem{ { - Partition: pointer.String("1"), + Partition: ptr.To("1"), }, }, }, @@ -418,16 +423,16 @@ func TestKubeadmConfigValidate(t *testing.T) { }, "file encoding gzip specified with Ignition": { enableIgnitionFeature: true, - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "default", }, - Spec: KubeadmConfigSpec{ - Format: Ignition, - Files: []File{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.Ignition, + Files: []bootstrapv1.File{ { - Encoding: Gzip, + Encoding: bootstrapv1.Gzip, }, }, }, @@ -436,16 +441,16 @@ func TestKubeadmConfigValidate(t *testing.T) { }, "file encoding gzip+base64 specified with Ignition": { enableIgnitionFeature: true, - in: &KubeadmConfig{ + in: &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "default", }, - Spec: KubeadmConfigSpec{ - Format: Ignition, - Files: []File{ + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.Ignition, + Files: []bootstrapv1.File{ { - Encoding: GzipBase64, + Encoding: bootstrapv1.GzipBase64, }, }, }, @@ -462,12 +467,23 @@ func TestKubeadmConfigValidate(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.KubeadmBootstrapFormatIgnition, true)() } g := NewWithT(t) + + webhook := &KubeadmConfig{} + if tt.expectErr { - g.Expect(tt.in.ValidateCreate()).NotTo(Succeed()) - g.Expect(tt.in.ValidateUpdate(nil)).NotTo(Succeed()) + warnings, err := webhook.ValidateCreate(ctx, tt.in) + g.Expect(err).To(HaveOccurred()) + g.Expect(warnings).To(BeEmpty()) + warnings, err = webhook.ValidateUpdate(ctx, nil, tt.in) + g.Expect(err).To(HaveOccurred()) + g.Expect(warnings).To(BeEmpty()) } else { - g.Expect(tt.in.ValidateCreate()).To(Succeed()) - g.Expect(tt.in.ValidateUpdate(nil)).To(Succeed()) + warnings, err := webhook.ValidateCreate(ctx, tt.in) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(warnings).To(BeEmpty()) + warnings, err = webhook.ValidateUpdate(ctx, nil, tt.in) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(warnings).To(BeEmpty()) } }) } diff --git a/bootstrap/kubeadm/api/v1beta1/kubeadmconfigtemplate_webhook.go b/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate.go similarity index 51% rename from bootstrap/kubeadm/api/v1beta1/kubeadmconfigtemplate_webhook.go rename to bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate.go index a1c581fa67b5..2957f06882c1 100644 --- a/bootstrap/kubeadm/api/v1beta1/kubeadmconfigtemplate_webhook.go +++ b/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate.go @@ -14,58 +14,82 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package webhooks import ( + "context" + "fmt" + apierrors "k8s.io/apimachinery/pkg/api/errors" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" ) -func (r *KubeadmConfigTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { +func (webhook *KubeadmConfigTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). - For(r). + For(&bootstrapv1.KubeadmConfigTemplate{}). + WithDefaulter(webhook). + WithValidator(webhook). Complete() } // +kubebuilder:webhook:verbs=create;update,path=/mutate-bootstrap-cluster-x-k8s-io-v1beta1-kubeadmconfigtemplate,mutating=true,failurePolicy=fail,groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigtemplates,versions=v1beta1,name=default.kubeadmconfigtemplate.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta1-kubeadmconfigtemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigtemplates,versions=v1beta1,name=validation.kubeadmconfigtemplate.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 -var _ webhook.Defaulter = &KubeadmConfigTemplate{} +// KubeadmConfigTemplate implements a validation and defaulting webhook for KubeadmConfigTemplate. +type KubeadmConfigTemplate struct{} // Default implements webhook.Defaulter so a webhook will be registered for the type. -func (r *KubeadmConfigTemplate) Default() { - DefaultKubeadmConfigSpec(&r.Spec.Template.Spec) -} +func (webhook *KubeadmConfigTemplate) Default(_ context.Context, obj runtime.Object) error { + c, ok := obj.(*bootstrapv1.KubeadmConfigTemplate) + if !ok { + return apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmConfigTemplate but got a %T", obj)) + } -// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta1-kubeadmconfigtemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigtemplates,versions=v1beta1,name=validation.kubeadmconfigtemplate.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + c.Spec.Template.Spec.Default() -var _ webhook.Validator = &KubeadmConfigTemplate{} + return nil +} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *KubeadmConfigTemplate) ValidateCreate() error { - return r.Spec.validate(r.Name) +func (webhook *KubeadmConfigTemplate) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + c, ok := obj.(*bootstrapv1.KubeadmConfigTemplate) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmConfigTemplate but got a %T", obj)) + } + + return nil, webhook.validate(&c.Spec, c.Name) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *KubeadmConfigTemplate) ValidateUpdate(_ runtime.Object) error { - return r.Spec.validate(r.Name) +func (webhook *KubeadmConfigTemplate) ValidateUpdate(_ context.Context, _, newObj runtime.Object) (admission.Warnings, error) { + newC, ok := newObj.(*bootstrapv1.KubeadmConfigTemplate) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmConfigTemplate but got a %T", newObj)) + } + + return nil, webhook.validate(&newC.Spec, newC.Name) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *KubeadmConfigTemplate) ValidateDelete() error { - return nil +func (webhook *KubeadmConfigTemplate) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil } -func (r *KubeadmConfigTemplateSpec) validate(name string) error { +func (webhook *KubeadmConfigTemplate) validate(r *bootstrapv1.KubeadmConfigTemplateSpec, name string) error { var allErrs field.ErrorList allErrs = append(allErrs, r.Template.Spec.Validate(field.NewPath("spec", "template", "spec"))...) + // Validate the metadata of the template. + allErrs = append(allErrs, r.Template.ObjectMeta.Validate(field.NewPath("spec", "template", "metadata"))...) if len(allErrs) == 0 { return nil } - return apierrors.NewInvalid(GroupVersion.WithKind("KubeadmConfigTemplate").GroupKind(), name, allErrs) + return apierrors.NewInvalid(bootstrapv1.GroupVersion.WithKind("KubeadmConfigTemplate").GroupKind(), name, allErrs) } diff --git a/bootstrap/kubeadm/api/v1beta1/kubeadmconfigtemplate_webhook_test.go b/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate_test.go similarity index 54% rename from bootstrap/kubeadm/api/v1beta1/kubeadmconfigtemplate_webhook_test.go rename to bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate_test.go index c35a4b438f32..215e6a6cde84 100644 --- a/bootstrap/kubeadm/api/v1beta1/kubeadmconfigtemplate_webhook_test.go +++ b/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate_test.go @@ -14,17 +14,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1_test +package webhooks import ( + "strings" "testing" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - utildefaulting "sigs.k8s.io/cluster-api/util/defaulting" + "sigs.k8s.io/cluster-api/internal/webhooks/util" ) func TestKubeadmConfigTemplateDefault(t *testing.T) { @@ -36,17 +38,19 @@ func TestKubeadmConfigTemplateDefault(t *testing.T) { }, } updateDefaultingKubeadmConfigTemplate := kubeadmConfigTemplate.DeepCopy() - updateDefaultingKubeadmConfigTemplate.Spec.Template.Spec.Verbosity = pointer.Int32(4) - t.Run("for KubeadmConfigTemplate", utildefaulting.DefaultValidateTest(updateDefaultingKubeadmConfigTemplate)) + updateDefaultingKubeadmConfigTemplate.Spec.Template.Spec.Verbosity = ptr.To[int32](4) + webhook := &KubeadmConfigTemplate{} + t.Run("for KubeadmConfigTemplate", util.CustomDefaultValidateTest(ctx, updateDefaultingKubeadmConfigTemplate, webhook)) - kubeadmConfigTemplate.Default() + g.Expect(webhook.Default(ctx, kubeadmConfigTemplate)).To(Succeed()) g.Expect(kubeadmConfigTemplate.Spec.Template.Spec.Format).To(Equal(bootstrapv1.CloudConfig)) } func TestKubeadmConfigTemplateValidation(t *testing.T) { cases := map[string]struct { - in *bootstrapv1.KubeadmConfigTemplate + in *bootstrapv1.KubeadmConfigTemplate + expectErr bool }{ "valid configuration": { in: &bootstrapv1.KubeadmConfigTemplate{ @@ -61,15 +65,44 @@ func TestKubeadmConfigTemplateValidation(t *testing.T) { }, }, }, + "should return error for invalid labels and annotations": { + in: &bootstrapv1.KubeadmConfigTemplate{Spec: bootstrapv1.KubeadmConfigTemplateSpec{ + Template: bootstrapv1.KubeadmConfigTemplateResource{ObjectMeta: clusterv1.ObjectMeta{ + Labels: map[string]string{ + "foo": "$invalid-key", + "bar": strings.Repeat("a", 64) + "too-long-value", + "/invalid-key": "foo", + }, + Annotations: map[string]string{ + "/invalid-key": "foo", + }, + }}, + }}, + expectErr: true, + }, } for name, tt := range cases { tt := tt + webhook := &KubeadmConfigTemplate{} + t.Run(name, func(t *testing.T) { g := NewWithT(t) - g.Expect(tt.in.ValidateCreate()).To(Succeed()) - g.Expect(tt.in.ValidateUpdate(nil)).To(Succeed()) + warnings, err := webhook.ValidateCreate(ctx, tt.in) + if tt.expectErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(warnings).To(BeEmpty()) + warnings, err = webhook.ValidateUpdate(ctx, nil, tt.in) + if tt.expectErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(warnings).To(BeEmpty()) }) } } diff --git a/bootstrap/kubeadm/main.go b/bootstrap/kubeadm/main.go index f8e73fa12ce1..847f9733ce23 100644 --- a/bootstrap/kubeadm/main.go +++ b/bootstrap/kubeadm/main.go @@ -21,16 +21,15 @@ import ( "context" "flag" "fmt" - "math/rand" - "net/http" - _ "net/http/pprof" "os" + goruntime "runtime" "time" - // +kubebuilder:scaffold:imports "github.com/spf13/pflag" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/leaderelection/resourcelock" cliflag "k8s.io/component-base/cli/flag" @@ -39,40 +38,30 @@ import ( _ "k8s.io/component-base/logs/json/register" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/webhook" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - bootstrapv1alpha3 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" - bootstrapv1alpha4 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" kubeadmbootstrapcontrollers "sigs.k8s.io/cluster-api/bootstrap/kubeadm/controllers" + "sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/webhooks" "sigs.k8s.io/cluster-api/controllers/remote" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" + bootstrapv1alpha3 "sigs.k8s.io/cluster-api/internal/apis/bootstrap/kubeadm/v1alpha3" + bootstrapv1alpha4 "sigs.k8s.io/cluster-api/internal/apis/bootstrap/kubeadm/v1alpha4" "sigs.k8s.io/cluster-api/util/flags" "sigs.k8s.io/cluster-api/version" ) var ( - scheme = runtime.NewScheme() - setupLog = ctrl.Log.WithName("setup") -) - -func init() { - klog.InitFlags(nil) + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + controllerName = "cluster-api-kubeadm-bootstrap-manager" - _ = clientgoscheme.AddToScheme(scheme) - _ = clusterv1.AddToScheme(scheme) - _ = expv1.AddToScheme(scheme) - _ = bootstrapv1alpha3.AddToScheme(scheme) - _ = bootstrapv1alpha4.AddToScheme(scheme) - _ = bootstrapv1.AddToScheme(scheme) - // +kubebuilder:scaffold:scheme -} - -var ( - metricsBindAddr string + // flags. enableLeaderElection bool leaderElectionLeaseDuration time.Duration leaderElectionRenewDeadline time.Duration @@ -80,23 +69,38 @@ var ( watchFilterValue string watchNamespace string profilerAddress string - kubeadmConfigConcurrency int + enableContentionProfiling bool syncPeriod time.Duration + restConfigQPS float32 + restConfigBurst int webhookPort int webhookCertDir string + webhookCertName string + webhookKeyName string healthAddr string - tokenTTL time.Duration tlsOptions = flags.TLSOptions{} + diagnosticsOptions = flags.DiagnosticsOptions{} logOptions = logs.NewOptions() + // CABPK specific flags. + clusterConcurrency int + clusterCacheTrackerConcurrency int + kubeadmConfigConcurrency int + tokenTTL time.Duration ) -// InitFlags initializes this manager's flags. +func init() { + _ = clientgoscheme.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) + _ = expv1.AddToScheme(scheme) + _ = bootstrapv1alpha3.AddToScheme(scheme) + _ = bootstrapv1alpha4.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) +} + +// InitFlags initializes the flags. func InitFlags(fs *pflag.FlagSet) { logsv1.AddFlags(logOptions, fs) - fs.StringVar(&metricsBindAddr, "metrics-bind-addr", "localhost:8080", - "The address the metric endpoint binds to.") - fs.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") @@ -112,41 +116,71 @@ func InitFlags(fs *pflag.FlagSet) { fs.StringVar(&watchNamespace, "namespace", "", "Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.") + fs.StringVar(&watchFilterValue, "watch-filter", "", + fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel)) + fs.StringVar(&profilerAddress, "profiler-address", "", "Bind address to expose the pprof profiler (e.g. localhost:6060)") + fs.BoolVar(&enableContentionProfiling, "contention-profiling", false, + "Enable block profiling") + + fs.IntVar(&clusterConcurrency, "cluster-concurrency", 10, + "Number of clusters to process simultaneously") + _ = fs.MarkDeprecated("cluster-concurrency", "This flag has no function anymore and is going to be removed in a next release. Use \"--clustercachetracker-concurrency\" instead.") + + fs.IntVar(&clusterCacheTrackerConcurrency, "clustercachetracker-concurrency", 10, + "Number of clusters to process simultaneously") + fs.IntVar(&kubeadmConfigConcurrency, "kubeadmconfig-concurrency", 10, "Number of kubeadm configs to process simultaneously") fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "The minimum interval at which watched resources are reconciled (e.g. 15m)") + fs.Float32Var(&restConfigQPS, "kube-api-qps", 20, + "Maximum queries per second from the controller client to the Kubernetes API server. Defaults to 20") + + fs.IntVar(&restConfigBurst, "kube-api-burst", 30, + "Maximum number of queries that should be allowed in one burst from the controller client to the Kubernetes API server. Default 30") + fs.DurationVar(&tokenTTL, "bootstrap-token-ttl", kubeadmbootstrapcontrollers.DefaultTokenTTL, "The amount of time the bootstrap token will be valid") - fs.StringVar(&watchFilterValue, "watch-filter", "", - fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel)) - fs.IntVar(&webhookPort, "webhook-port", 9443, "Webhook Server port") fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", - "Webhook cert dir, only used when webhook-port is specified.") + "Webhook cert dir.") + + fs.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", + "Webhook cert name.") + + fs.StringVar(&webhookKeyName, "webhook-key-name", "tls.key", + "Webhook key name.") fs.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") + flags.AddDiagnosticsOptions(fs, &diagnosticsOptions) flags.AddTLSOptions(fs, &tlsOptions) feature.MutableGates.AddFlag(fs) } -func main() { - rand.Seed(time.Now().UnixNano()) +// Add RBAC for the authorized diagnostics endpoint. +// +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create +// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create +func main() { InitFlags(pflag.CommandLine) pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + // Set log level 2 as default. + if err := pflag.CommandLine.Set("v", "2"); err != nil { + setupLog.Error(err, "failed to set default log level") + os.Exit(1) + } pflag.Parse() if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { @@ -156,18 +190,11 @@ func main() { // klog.Background will automatically use the right logger. ctrl.SetLogger(klog.Background()) - if profilerAddress != "" { - setupLog.Info(fmt.Sprintf("Profiler listening for requests at %s", profilerAddress)) - go func() { - srv := http.Server{Addr: profilerAddress, ReadHeaderTimeout: 2 * time.Second} - if err := srv.ListenAndServe(); err != nil { - setupLog.Error(err, "problem running profiler server") - } - }() - } restConfig := ctrl.GetConfigOrDie() - restConfig.UserAgent = remote.DefaultClusterAPIUserAgent("cluster-api-kubeadm-bootstrap-manager") + restConfig.QPS = restConfigQPS + restConfig.Burst = restConfigBurst + restConfig.UserAgent = remote.DefaultClusterAPIUserAgent(controllerName) tlsOptionOverrides, err := flags.GetTLSOptionOverrideFuncs(tlsOptions) if err != nil { @@ -175,26 +202,65 @@ func main() { os.Exit(1) } - mgr, err := ctrl.NewManager(restConfig, ctrl.Options{ + diagnosticsOpts := flags.GetDiagnosticsOptions(diagnosticsOptions) + + var watchNamespaces map[string]cache.Config + if watchNamespace != "" { + watchNamespaces = map[string]cache.Config{ + watchNamespace: {}, + } + } + + if enableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + + req, _ := labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Exists, nil) + clusterSecretCacheSelector := labels.NewSelector().Add(*req) + + ctrlOptions := ctrl.Options{ Scheme: scheme, - MetricsBindAddress: metricsBindAddr, LeaderElection: enableLeaderElection, LeaderElectionID: "kubeadm-bootstrap-manager-leader-election-capi", LeaseDuration: &leaderElectionLeaseDuration, RenewDeadline: &leaderElectionRenewDeadline, RetryPeriod: &leaderElectionRetryPeriod, LeaderElectionResourceLock: resourcelock.LeasesResourceLock, - Namespace: watchNamespace, - SyncPeriod: &syncPeriod, - ClientDisableCacheFor: []client.Object{ - &corev1.ConfigMap{}, - &corev1.Secret{}, + HealthProbeBindAddress: healthAddr, + PprofBindAddress: profilerAddress, + Metrics: diagnosticsOpts, + Cache: cache.Options{ + DefaultNamespaces: watchNamespaces, + SyncPeriod: &syncPeriod, + ByObject: map[client.Object]cache.ByObject{ + // Note: Only Secrets with the cluster name label are cached. + // The default client of the manager won't use the cache for secrets at all (see Client.Cache.DisableFor). + // The cached secrets will only be used by the secretCachingClient we create below. + &corev1.Secret{}: { + Label: clusterSecretCacheSelector, + }, + }, }, - Port: webhookPort, - HealthProbeBindAddress: healthAddr, - CertDir: webhookCertDir, - TLSOpts: tlsOptionOverrides, - }) + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + }, + }, + WebhookServer: webhook.NewServer( + webhook.Options{ + Port: webhookPort, + CertDir: webhookCertDir, + CertName: webhookCertName, + KeyName: webhookKeyName, + TLSOpts: tlsOptionOverrides, + }, + ), + } + + mgr, err := ctrl.NewManager(restConfig, ctrlOptions) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) @@ -207,8 +273,7 @@ func main() { setupWebhooks(mgr) setupReconcilers(ctx, mgr) - // +kubebuilder:scaffold:builder - setupLog.Info("starting manager", "version", version.Get().String()) + setupLog.Info("Starting manager", "version", version.Get().String()) if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) @@ -228,10 +293,46 @@ func setupChecks(mgr ctrl.Manager) { } func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { - if err := (&kubeadmbootstrapcontrollers.KubeadmConfigReconciler{ + secretCachingClient, err := client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + setupLog.Error(err, "unable to create secret caching client") + os.Exit(1) + } + + // Set up a ClusterCacheTracker and ClusterCacheReconciler to provide to controllers + // requiring a connection to a remote cluster + tracker, err := remote.NewClusterCacheTracker( + mgr, + remote.ClusterCacheTrackerOptions{ + SecretCachingClient: secretCachingClient, + ControllerName: controllerName, + Log: &ctrl.Log, + }, + ) + if err != nil { + setupLog.Error(err, "unable to create cluster cache tracker") + os.Exit(1) + } + if err := (&remote.ClusterCacheReconciler{ Client: mgr.GetClient(), + Tracker: tracker, WatchFilterValue: watchFilterValue, - TokenTTL: tokenTTL, + }).SetupWithManager(ctx, mgr, concurrency(clusterCacheTrackerConcurrency)); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ClusterCacheReconciler") + os.Exit(1) + } + + if err := (&kubeadmbootstrapcontrollers.KubeadmConfigReconciler{ + Client: mgr.GetClient(), + SecretCachingClient: secretCachingClient, + Tracker: tracker, + WatchFilterValue: watchFilterValue, + TokenTTL: tokenTTL, }).SetupWithManager(ctx, mgr, concurrency(kubeadmConfigConcurrency)); err != nil { setupLog.Error(err, "unable to create controller", "controller", "KubeadmConfig") os.Exit(1) @@ -239,11 +340,11 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { } func setupWebhooks(mgr ctrl.Manager) { - if err := (&bootstrapv1.KubeadmConfig{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&webhooks.KubeadmConfig{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "KubeadmConfig") os.Exit(1) } - if err := (&bootstrapv1.KubeadmConfigTemplate{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&webhooks.KubeadmConfigTemplate{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "KubeadmConfigTemplate") os.Exit(1) } diff --git a/bootstrap/kubeadm/types/upstreamv1beta1/bootstraptokenstring.go b/bootstrap/kubeadm/types/upstreamv1beta1/bootstraptokenstring.go index f994efcdb6c6..efd815445f10 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta1/bootstraptokenstring.go +++ b/bootstrap/kubeadm/types/upstreamv1beta1/bootstraptokenstring.go @@ -62,7 +62,7 @@ func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { // String returns the string representation of the BootstrapTokenString. func (bts BootstrapTokenString) String() string { - if len(bts.ID) > 0 && len(bts.Secret) > 0 { + if bts.ID != "" && bts.Secret != "" { return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) } return "" diff --git a/bootstrap/kubeadm/types/upstreamv1beta1/bootstraptokenstring_test.go b/bootstrap/kubeadm/types/upstreamv1beta1/bootstraptokenstring_test.go index 3a723e74f999..821d2a6295b6 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta1/bootstraptokenstring_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta1/bootstraptokenstring_test.go @@ -18,9 +18,9 @@ package upstreamv1beta1 import ( "encoding/json" - "reflect" "testing" + "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" "github.com/pkg/errors" ) @@ -39,7 +39,7 @@ func TestMarshalJSON(t *testing.T) { g := NewWithT(t) b, err := json.Marshal(rt.bts) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(b).To(BeEquivalentTo(rt.expected)) }) } @@ -69,7 +69,7 @@ func TestUnmarshalJSON(t *testing.T) { if rt.expectedError { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } g.Expect(newbts).To(Equal(rt.bts)) }) @@ -98,7 +98,7 @@ func roundtrip(input string, bts *BootstrapTokenString) error { var err error newbts := &BootstrapTokenString{} // If string input was specified, roundtrip like this: string -> (unmarshal) -> object -> (marshal) -> string - if len(input) > 0 { + if input != "" { if err := json.Unmarshal([]byte(input), newbts); err != nil { return errors.Wrap(err, "expected no unmarshal error, got error") } @@ -119,11 +119,12 @@ func roundtrip(input string, bts *BootstrapTokenString) error { if err := json.Unmarshal(b, newbts); err != nil { return errors.Wrap(err, "expected no unmarshal error, got error") } - if !reflect.DeepEqual(bts, newbts) { + if diff := cmp.Diff(bts, newbts); diff != "" { return errors.Errorf( - "expected object: %v\n\t actual: %v", + "expected object: %v\n\t actual: %v\n\t got diff: %v", bts, newbts, + diff, ) } } @@ -179,7 +180,7 @@ func TestNewBootstrapTokenString(t *testing.T) { if rt.expectedError { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } g.Expect(actual).To(Equal(rt.bts)) }) @@ -214,7 +215,7 @@ func TestNewBootstrapTokenStringFromIDAndSecret(t *testing.T) { if rt.expectedError { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } g.Expect(actual).To(Equal(rt.bts)) }) diff --git a/bootstrap/kubeadm/types/upstreamv1beta1/zz_generated.deepcopy.go b/bootstrap/kubeadm/types/upstreamv1beta1/zz_generated.deepcopy.go index 2eebc2402470..aeaaa3afff3d 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta1/zz_generated.deepcopy.go +++ b/bootstrap/kubeadm/types/upstreamv1beta1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. diff --git a/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring.go b/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring.go index 0dca2e0b3b0a..2ad57c137bd0 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring.go +++ b/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring.go @@ -62,7 +62,7 @@ func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { // String returns the string representation of the BootstrapTokenString. func (bts BootstrapTokenString) String() string { - if len(bts.ID) > 0 && len(bts.Secret) > 0 { + if bts.ID != "" && bts.Secret != "" { return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) } return "" diff --git a/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring_test.go b/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring_test.go index 56f1e1d38d10..c6f33b0430c1 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring_test.go @@ -18,9 +18,9 @@ package upstreamv1beta2 import ( "encoding/json" - "reflect" "testing" + "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" "github.com/pkg/errors" ) @@ -39,7 +39,7 @@ func TestMarshalJSON(t *testing.T) { g := NewWithT(t) b, err := json.Marshal(rt.bts) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(string(b)).To(Equal(rt.expected)) }) } @@ -69,9 +69,9 @@ func TestUnmarshalJSON(t *testing.T) { if rt.expectedError { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } - g.Expect(newbts).To(Equal(rt.bts)) + g.Expect(newbts).To(BeComparableTo(rt.bts)) }) } } @@ -98,7 +98,7 @@ func roundtrip(input string, bts *BootstrapTokenString) error { var err error newbts := &BootstrapTokenString{} // If string input was specified, roundtrip like this: string -> (unmarshal) -> object -> (marshal) -> string - if len(input) > 0 { + if input != "" { if err := json.Unmarshal([]byte(input), newbts); err != nil { return errors.Wrap(err, "expected no unmarshal error, got error") } @@ -119,11 +119,12 @@ func roundtrip(input string, bts *BootstrapTokenString) error { if err := json.Unmarshal(b, newbts); err != nil { return errors.Wrap(err, "expected no unmarshal error, got error") } - if !reflect.DeepEqual(bts, newbts) { + if diff := cmp.Diff(bts, newbts); diff != "" { return errors.Errorf( - "expected object: %v\n\t actual: %v", + "expected object: %v\n\t actual: %v\n\t got diff: %v", bts, newbts, + diff, ) } } @@ -179,9 +180,9 @@ func TestNewBootstrapTokenString(t *testing.T) { if rt.expectedError { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } - g.Expect(actual).To(Equal(rt.bts)) + g.Expect(actual).To(BeComparableTo(rt.bts)) }) } } @@ -214,9 +215,9 @@ func TestNewBootstrapTokenStringFromIDAndSecret(t *testing.T) { if rt.expectedError { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } - g.Expect(actual).To(Equal(rt.bts)) + g.Expect(actual).To(BeComparableTo(rt.bts)) }) } } diff --git a/bootstrap/kubeadm/types/upstreamv1beta2/zz_generated.conversion.go b/bootstrap/kubeadm/types/upstreamv1beta2/zz_generated.conversion.go index 9a40da3b232f..0769ceb3a65f 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta2/zz_generated.conversion.go +++ b/bootstrap/kubeadm/types/upstreamv1beta2/zz_generated.conversion.go @@ -1,5 +1,5 @@ -//go:build !ignore_autogenerated_kubeadm_types -// +build !ignore_autogenerated_kubeadm_types +//go:build !ignore_autogenerated +// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. diff --git a/bootstrap/kubeadm/types/upstreamv1beta2/zz_generated.deepcopy.go b/bootstrap/kubeadm/types/upstreamv1beta2/zz_generated.deepcopy.go index 52218880c2d3..4ba216563738 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta2/zz_generated.deepcopy.go +++ b/bootstrap/kubeadm/types/upstreamv1beta2/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. @@ -24,7 +23,7 @@ package upstreamv1beta2 import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring.go b/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring.go index 2bb4fda07254..5b0b0aebc3ff 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring.go @@ -60,7 +60,7 @@ func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { // String returns the string representation of the BootstrapTokenString. func (bts BootstrapTokenString) String() string { - if len(bts.ID) > 0 && len(bts.Secret) > 0 { + if bts.ID != "" && bts.Secret != "" { return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) } return "" diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring_test.go b/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring_test.go index b19e652b0671..202244fa4e7d 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring_test.go @@ -18,9 +18,9 @@ package upstreamv1beta3 import ( "encoding/json" - "reflect" "testing" + "github.com/google/go-cmp/cmp" "github.com/pkg/errors" ) @@ -71,11 +71,12 @@ func TestUnmarshalJSON(t *testing.T) { err := json.Unmarshal([]byte(rt.input), newbts) if (err != nil) != rt.expectedError { t.Errorf("failed BootstrapTokenString.UnmarshalJSON:\n\texpected error: %t\n\t actual error: %v", rt.expectedError, err) - } else if !reflect.DeepEqual(rt.bts, newbts) { + } else if diff := cmp.Diff(rt.bts, newbts); diff != "" { t.Errorf( - "failed BootstrapTokenString.UnmarshalJSON:\n\texpected: %v\n\t actual: %v", + "failed BootstrapTokenString.UnmarshalJSON:\n\texpected: %v\n\t actual: %v\n\t diff: %v", rt.bts, newbts, + diff, ) } }) @@ -104,7 +105,7 @@ func roundtrip(input string, bts *BootstrapTokenString) error { var err error newbts := &BootstrapTokenString{} // If string input was specified, roundtrip like this: string -> (unmarshal) -> object -> (marshal) -> string - if len(input) > 0 { + if input != "" { if err := json.Unmarshal([]byte(input), newbts); err != nil { return errors.Wrap(err, "expected no unmarshal error, got error") } @@ -125,11 +126,12 @@ func roundtrip(input string, bts *BootstrapTokenString) error { if err := json.Unmarshal(b, newbts); err != nil { return errors.Wrap(err, "expected no unmarshal error, got error") } - if !reflect.DeepEqual(bts, newbts) { + if diff := cmp.Diff(bts, newbts); diff != "" { return errors.Errorf( - "expected object: %v\n\t actual: %v", + "expected object: %v\n\t actual: %v\n\t got diff: %v", bts, newbts, + diff, ) } } @@ -192,12 +194,13 @@ func TestNewBootstrapTokenString(t *testing.T) { rt.expectedError, err, ) - } else if !reflect.DeepEqual(actual, rt.bts) { + } else if diff := cmp.Diff(actual, rt.bts); diff != "" { t.Errorf( - "failed NewBootstrapTokenString for the token %q\n\texpected: %v\n\t actual: %v", + "failed NewBootstrapTokenString for the token %q\n\texpected: %v\n\t actual: %v\n\t diff: %v", rt.token, rt.bts, actual, + diff, ) } }) @@ -235,13 +238,14 @@ func TestNewBootstrapTokenStringFromIDAndSecret(t *testing.T) { rt.expectedError, err, ) - } else if !reflect.DeepEqual(actual, rt.bts) { + } else if diff := cmp.Diff(actual, rt.bts); diff != "" { t.Errorf( - "failed NewBootstrapTokenStringFromIDAndSecret for the token with id %q and secret %q\n\texpected: %v\n\t actual: %v", + "failed NewBootstrapTokenStringFromIDAndSecret for the token with id %q and secret %q\n\texpected: %v\n\t actual: %v\n\t diff: %v", rt.id, rt.secret, rt.bts, actual, + diff, ) } }) diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go b/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go index d849616cb3de..0df0b3677e9d 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go @@ -1,5 +1,5 @@ -//go:build !ignore_autogenerated_kubeadm_types -// +build !ignore_autogenerated_kubeadm_types +//go:build !ignore_autogenerated +// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.deepcopy.go b/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.deepcopy.go index 07902b44e68a..ded9eea14be3 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.deepcopy.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. @@ -24,7 +23,7 @@ package upstreamv1beta3 import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/bootstrap/kubeadm/types/utils.go b/bootstrap/kubeadm/types/utils.go index 020d3002da83..57505e7ec819 100644 --- a/bootstrap/kubeadm/types/utils.go +++ b/bootstrap/kubeadm/types/utils.go @@ -18,7 +18,7 @@ limitations under the License. package utils import ( - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -27,50 +27,41 @@ import ( "sigs.k8s.io/controller-runtime/pkg/scheme" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta1" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta2" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta3" "sigs.k8s.io/cluster-api/util/version" ) var ( - v1beta1KubeadmVersion = semver.MustParse("1.13.0") v1beta2KubeadmVersion = semver.MustParse("1.15.0") v1beta3KubeadmVersion = semver.MustParse("1.22.0") clusterConfigurationVersionTypeMap = map[schema.GroupVersion]conversion.Convertible{ upstreamv1beta3.GroupVersion: &upstreamv1beta3.ClusterConfiguration{}, upstreamv1beta2.GroupVersion: &upstreamv1beta2.ClusterConfiguration{}, - upstreamv1beta1.GroupVersion: &upstreamv1beta1.ClusterConfiguration{}, } clusterStatusVersionTypeMap = map[schema.GroupVersion]conversion.Convertible{ // ClusterStatus has been removed in v1beta3, so we don't need an entry for v1beta3 upstreamv1beta2.GroupVersion: &upstreamv1beta2.ClusterStatus{}, - upstreamv1beta1.GroupVersion: &upstreamv1beta1.ClusterStatus{}, } initConfigurationVersionTypeMap = map[schema.GroupVersion]conversion.Convertible{ upstreamv1beta3.GroupVersion: &upstreamv1beta3.InitConfiguration{}, upstreamv1beta2.GroupVersion: &upstreamv1beta2.InitConfiguration{}, - upstreamv1beta1.GroupVersion: &upstreamv1beta1.InitConfiguration{}, } joinConfigurationVersionTypeMap = map[schema.GroupVersion]conversion.Convertible{ upstreamv1beta3.GroupVersion: &upstreamv1beta3.JoinConfiguration{}, upstreamv1beta2.GroupVersion: &upstreamv1beta2.JoinConfiguration{}, - upstreamv1beta1.GroupVersion: &upstreamv1beta1.JoinConfiguration{}, } ) // KubeVersionToKubeadmAPIGroupVersion maps a Kubernetes version to the correct Kubeadm API Group supported. func KubeVersionToKubeadmAPIGroupVersion(v semver.Version) (schema.GroupVersion, error) { switch { - case version.Compare(v, v1beta1KubeadmVersion, version.WithoutPreReleases()) < 0: - return schema.GroupVersion{}, errors.New("the bootstrap provider for kubeadm doesn't support Kubernetes version lower than v1.13.0") case version.Compare(v, v1beta2KubeadmVersion, version.WithoutPreReleases()) < 0: - // NOTE: All the Kubernetes version >= v1.13 and < v1.15 should use the kubeadm API version v1beta1 - return upstreamv1beta1.GroupVersion, nil + return schema.GroupVersion{}, errors.New("the bootstrap provider for kubeadm doesn't support Kubernetes version lower than v1.15.0") case version.Compare(v, v1beta3KubeadmVersion, version.WithoutPreReleases()) < 0: // NOTE: All the Kubernetes version >= v1.15 and < v1.22 should use the kubeadm API version v1beta2 return upstreamv1beta2.GroupVersion, nil @@ -181,6 +172,26 @@ func UnmarshalClusterStatus(yaml string) (*bootstrapv1.ClusterStatus, error) { return obj, nil } +// UnmarshalInitConfiguration tries to translate a Kubeadm API yaml back to the InitConfiguration type. +// NOTE: The yaml could be any of the known formats for the kubeadm InitConfiguration type. +func UnmarshalInitConfiguration(yaml string) (*bootstrapv1.InitConfiguration, error) { + obj := &bootstrapv1.InitConfiguration{} + if err := unmarshalFromVersions(yaml, initConfigurationVersionTypeMap, obj); err != nil { + return nil, err + } + return obj, nil +} + +// UnmarshalJoinConfiguration tries to translate a Kubeadm API yaml back to the JoinConfiguration type. +// NOTE: The yaml could be any of the known formats for the kubeadm JoinConfiguration type. +func UnmarshalJoinConfiguration(yaml string) (*bootstrapv1.JoinConfiguration, error) { + obj := &bootstrapv1.JoinConfiguration{} + if err := unmarshalFromVersions(yaml, joinConfigurationVersionTypeMap, obj); err != nil { + return nil, err + } + return obj, nil +} + func unmarshalFromVersions(yaml string, kubeadmAPIVersions map[schema.GroupVersion]conversion.Convertible, capiObj conversion.Hub) error { // For each know kubeadm API version for gv, obj := range kubeadmAPIVersions { @@ -192,7 +203,8 @@ func unmarshalFromVersions(yaml string, kubeadmAPIVersions map[schema.GroupVersi return errors.Wrapf(err, "failed to build scheme for kubeadm types conversions") } - if _, _, err := codecs.UniversalDeserializer().Decode([]byte(yaml), &gvk, kubeadmObj); err == nil { + _, _, err = codecs.UniversalDeserializer().Decode([]byte(yaml), &gvk, kubeadmObj) + if err == nil { // If conversion worked, then converts the kubeadmObj (spoke) back to the Cluster API ClusterConfiguration type (hub). if err := kubeadmObj.(conversion.Convertible).ConvertTo(capiObj); err != nil { return errors.Wrapf(err, "failed to convert kubeadm types to Cluster API types") diff --git a/bootstrap/kubeadm/types/utils_test.go b/bootstrap/kubeadm/types/utils_test.go index 96a245420f31..49c9450b953c 100644 --- a/bootstrap/kubeadm/types/utils_test.go +++ b/bootstrap/kubeadm/types/utils_test.go @@ -19,13 +19,12 @@ package utils import ( "testing" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/runtime/schema" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta1" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta2" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta3" ) @@ -49,28 +48,11 @@ func TestKubeVersionToKubeadmAPIGroupVersion(t *testing.T) { wantErr: true, }, { - name: "pass with minimum kubernetes alpha version for kubeadm API v1beta1", - args: args{ - version: semver.MustParse("1.13.0-alpha.0.734+ba502ee555924a"), - }, - want: upstreamv1beta1.GroupVersion, - wantErr: false, - }, - { - name: "pass with minimum kubernetes version for kubeadm API v1beta1", - args: args{ - version: semver.MustParse("1.13.0"), - }, - want: upstreamv1beta1.GroupVersion, - wantErr: false, - }, - { - name: "pass with kubernetes version for kubeadm API v1beta1", + name: "fails with kubernetes version for kubeadm API v1beta1", args: args{ version: semver.MustParse("1.14.99"), }, - want: upstreamv1beta1.GroupVersion, - wantErr: false, + wantErr: true, }, { name: "pass with minimum kubernetes alpha version for kubeadm API v1beta2", @@ -139,7 +121,7 @@ func TestKubeVersionToKubeadmAPIGroupVersion(t *testing.T) { return } g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) + g.Expect(got).To(BeComparableTo(tt.want)) }) } } @@ -155,22 +137,6 @@ func TestMarshalClusterConfigurationForVersion(t *testing.T) { want string wantErr bool }{ - { - name: "Generates a v1beta1 kubeadm configuration", - args: args{ - capiObj: &bootstrapv1.ClusterConfiguration{}, - version: semver.MustParse("1.14.9"), - }, - want: "apiServer: {}\n" + - "apiVersion: kubeadm.k8s.io/v1beta1\n" + "" + - "controllerManager: {}\n" + - "dns: {}\n" + - "etcd: {}\n" + - "kind: ClusterConfiguration\n" + - "networking: {}\n" + - "scheduler: {}\n", - wantErr: false, - }, { name: "Generates a v1beta2 kubeadm configuration", args: args{ @@ -230,17 +196,6 @@ func TestMarshalClusterStatusForVersion(t *testing.T) { want string wantErr bool }{ - { - name: "Generates a v1beta1 kubeadm status", - args: args{ - capiObj: &bootstrapv1.ClusterStatus{}, - version: semver.MustParse("1.14.9"), - }, - want: "apiEndpoints: null\n" + - "apiVersion: kubeadm.k8s.io/v1beta1\n" + "" + - "kind: ClusterStatus\n", - wantErr: false, - }, { name: "Generates a v1beta2 kubeadm status", args: args{ @@ -287,20 +242,6 @@ func TestMarshalInitConfigurationForVersion(t *testing.T) { want string wantErr bool }{ - { - name: "Generates a v1beta1 kubeadm configuration", - args: args{ - capiObj: &bootstrapv1.InitConfiguration{}, - version: semver.MustParse("1.14.9"), - }, - want: "apiVersion: kubeadm.k8s.io/v1beta1\n" + - "kind: InitConfiguration\n" + - "localAPIEndpoint:\n" + - " advertiseAddress: \"\"\n" + - " bindPort: 0\n" + - "nodeRegistration: {}\n", - wantErr: false, - }, { name: "Generates a v1beta2 kubeadm configuration", args: args{ @@ -365,18 +306,6 @@ func TestMarshalJoinConfigurationForVersion(t *testing.T) { want string wantErr bool }{ - { - name: "Generates a v1beta1 kubeadm configuration", - args: args{ - capiObj: &bootstrapv1.JoinConfiguration{}, - version: semver.MustParse("1.14.9"), - }, - want: "apiVersion: kubeadm.k8s.io/v1beta1\n" + "" + - "discovery: {}\n" + - "kind: JoinConfiguration\n" + - "nodeRegistration: {}\n", - wantErr: false, - }, { name: "Generates a v1beta2 kubeadm configuration", args: args{ @@ -440,21 +369,6 @@ func TestUnmarshalClusterConfiguration(t *testing.T) { want *bootstrapv1.ClusterConfiguration wantErr bool }{ - { - name: "Parses a v1beta1 kubeadm configuration", - args: args{ - yaml: "apiServer: {}\n" + - "apiVersion: kubeadm.k8s.io/v1beta1\n" + "" + - "controllerManager: {}\n" + - "dns: {}\n" + - "etcd: {}\n" + - "kind: ClusterConfiguration\n" + - "networking: {}\n" + - "scheduler: {}\n", - }, - want: &bootstrapv1.ClusterConfiguration{}, - wantErr: false, - }, { name: "Parses a v1beta2 kubeadm configuration", args: args{ @@ -496,7 +410,7 @@ func TestUnmarshalClusterConfiguration(t *testing.T) { return } g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want), cmp.Diff(tt.want, got)) + g.Expect(got).To(BeComparableTo(tt.want), cmp.Diff(tt.want, got)) }) } } @@ -511,21 +425,11 @@ func TestUnmarshalClusterStatus(t *testing.T) { want *bootstrapv1.ClusterStatus wantErr bool }{ - { - name: "Parses a v1beta1 kubeadm configuration", - args: args{ - yaml: "apiEndpoints: null\n" + - "apiVersion: kubeadm.k8s.io/v1beta1\n" + "" + - "kind: ClusterStatus\n", - }, - want: &bootstrapv1.ClusterStatus{}, - wantErr: false, - }, { name: "Parses a v1beta2 kubeadm configuration", args: args{ yaml: "apiEndpoints: null\n" + - "apiVersion: kubeadm.k8s.io/v1beta1\n" + "" + + "apiVersion: kubeadm.k8s.io/v1beta2\n" + "" + "kind: ClusterStatus\n", }, want: &bootstrapv1.ClusterStatus{}, @@ -551,7 +455,103 @@ func TestUnmarshalClusterStatus(t *testing.T) { return } g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want), cmp.Diff(tt.want, got)) + g.Expect(got).To(BeComparableTo(tt.want), cmp.Diff(tt.want, got)) + }) + } +} + +func TestUnmarshalInitConfiguration(t *testing.T) { + type args struct { + yaml string + } + tests := []struct { + name string + args args + want *bootstrapv1.InitConfiguration + wantErr bool + }{ + { + name: "Parses a v1beta2 kubeadm configuration", + args: args{ + yaml: "apiVersion: kubeadm.k8s.io/v1beta2\n" + "" + + "kind: InitConfiguration\n" + + "localAPIEndpoint: {}\n" + + "nodeRegistration: {}\n", + }, + want: &bootstrapv1.InitConfiguration{}, + wantErr: false, + }, + { + name: "Parses a v1beta3 kubeadm configuration", + args: args{ + yaml: "apiVersion: kubeadm.k8s.io/v1beta3\n" + "" + + "kind: InitConfiguration\n" + + "localAPIEndpoint: {}\n" + + "nodeRegistration: {}\n", + }, + want: &bootstrapv1.InitConfiguration{}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := UnmarshalInitConfiguration(tt.args.yaml) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(BeComparableTo(tt.want), cmp.Diff(tt.want, got)) + }) + } +} + +func TestUnmarshalJoinConfiguration(t *testing.T) { + type args struct { + yaml string + } + tests := []struct { + name string + args args + want *bootstrapv1.JoinConfiguration + wantErr bool + }{ + { + name: "Parses a v1beta2 kubeadm configuration", + args: args{ + yaml: "apiVersion: kubeadm.k8s.io/v1beta2\n" + "" + + "caCertPath: \"\"\n" + + "discovery: {}\n" + + "kind: JoinConfiguration\n", + }, + want: &bootstrapv1.JoinConfiguration{}, + wantErr: false, + }, + { + name: "Parses a v1beta3 kubeadm configuration", + args: args{ + yaml: "apiVersion: kubeadm.k8s.io/v1beta3\n" + "" + + "caCertPath: \"\"\n" + + "discovery: {}\n" + + "kind: JoinConfiguration\n", + }, + want: &bootstrapv1.JoinConfiguration{}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := UnmarshalJoinConfiguration(tt.args.yaml) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(BeComparableTo(tt.want), cmp.Diff(tt.want, got)) }) } } diff --git a/bootstrap/kubeadm/webhooks/alias.go b/bootstrap/kubeadm/webhooks/alias.go new file mode 100644 index 000000000000..72702072ad5a --- /dev/null +++ b/bootstrap/kubeadm/webhooks/alias.go @@ -0,0 +1,39 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhooks + +import ( + ctrl "sigs.k8s.io/controller-runtime" + + "sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/webhooks" +) + +// KubeadmConfig implements a validating and defaulting webhook for KubeadmConfig. +type KubeadmConfig struct{} + +// SetupWebhookWithManager sets up KubeadmConfig webhooks. +func (webhook *KubeadmConfig) SetupWebhookWithManager(mgr ctrl.Manager) error { + return (&webhooks.KubeadmConfig{}).SetupWebhookWithManager(mgr) +} + +// KubeadmConfigTemplate implements a validating and defaulting webhook for KubeadmConfigTemplate. +type KubeadmConfigTemplate struct{} + +// SetupWebhookWithManager sets up KubeadmConfigTemplate webhooks. +func (webhook *KubeadmConfigTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { + return (&webhooks.KubeadmConfigTemplate{}).SetupWebhookWithManager(mgr) +} diff --git a/bootstrap/kubeadm/webhooks/doc.go b/bootstrap/kubeadm/webhooks/doc.go new file mode 100644 index 000000000000..571963a3ae0e --- /dev/null +++ b/bootstrap/kubeadm/webhooks/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhooks provides the validating webhooks for KubeadmConfig and KubeadmConfigTemplate. +package webhooks diff --git a/bootstrap/util/configowner.go b/bootstrap/util/configowner.go index d10fadd1b851..7ed7e2edbf24 100644 --- a/bootstrap/util/configowner.go +++ b/bootstrap/util/configowner.go @@ -24,7 +24,9 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -123,8 +125,20 @@ func (co ConfigOwner) KubernetesVersion() string { return version } -// GetConfigOwner returns the Unstructured object owning the current resource. +// GetConfigOwner returns the Unstructured object owning the current resource +// using the uncached unstructured client. For performance-sensitive uses, +// consider GetTypedConfigOwner. func GetConfigOwner(ctx context.Context, c client.Client, obj metav1.Object) (*ConfigOwner, error) { + return getConfigOwner(ctx, c, obj, GetOwnerByRef) +} + +// GetTypedConfigOwner returns the Unstructured object owning the current +// resource. The implementation ensures a typed client is used, so the objects are read from the cache. +func GetTypedConfigOwner(ctx context.Context, c client.Client, obj metav1.Object) (*ConfigOwner, error) { + return getConfigOwner(ctx, c, obj, GetTypedOwnerByRef) +} + +func getConfigOwner(ctx context.Context, c client.Client, obj metav1.Object, getFn func(context.Context, client.Client, *corev1.ObjectReference) (*ConfigOwner, error)) (*ConfigOwner, error) { allowedGKs := []schema.GroupKind{ { Group: clusterv1.GroupVersion.Group, @@ -148,7 +162,7 @@ func GetConfigOwner(ctx context.Context, c client.Client, obj metav1.Object) (*C for _, gk := range allowedGKs { if refGVK.Group == gk.Group && refGVK.Kind == gk.Kind { - return GetOwnerByRef(ctx, c, &corev1.ObjectReference{ + return getFn(ctx, c, &corev1.ObjectReference{ APIVersion: ref.APIVersion, Kind: ref.Kind, Name: ref.Name, @@ -168,3 +182,35 @@ func GetOwnerByRef(ctx context.Context, c client.Client, ref *corev1.ObjectRefer } return &ConfigOwner{obj}, nil } + +// GetTypedOwnerByRef finds and returns the owner by looking at the object +// reference. The implementation ensures a typed client is used, so the objects are read from the cache. +func GetTypedOwnerByRef(ctx context.Context, c client.Client, ref *corev1.ObjectReference) (*ConfigOwner, error) { + objGVK := ref.GroupVersionKind() + obj, err := c.Scheme().New(objGVK) + if err != nil { + return nil, errors.Wrapf(err, "failed to construct object of type %s", ref.GroupVersionKind()) + } + clientObj, ok := obj.(client.Object) + if !ok { + return nil, errors.Errorf("expected owner reference to refer to a client.Object, is actually %T", obj) + } + key := types.NamespacedName{ + Namespace: ref.Namespace, + Name: ref.Name, + } + err = c.Get(ctx, key, clientObj) + if err != nil { + return nil, err + } + + content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(clientObj) + if err != nil { + return nil, err + } + u := unstructured.Unstructured{} + u.SetUnstructuredContent(content) + u.SetGroupVersionKind(objGVK) + + return &ConfigOwner{&u}, nil +} diff --git a/bootstrap/util/configowner_test.go b/bootstrap/util/configowner_test.go index bdd9b767adbc..7263e9513bd6 100644 --- a/bootstrap/util/configowner_test.go +++ b/bootstrap/util/configowner_test.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "context" "testing" . "github.com/onsi/gomega" @@ -24,7 +25,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -34,136 +36,146 @@ import ( ) func TestGetConfigOwner(t *testing.T) { - t.Run("should get the owner when present (Machine)", func(t *testing.T) { - g := NewWithT(t) - myMachine := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-machine", - Namespace: metav1.NamespaceDefault, - Labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "", + doTests := func(t *testing.T, getFn func(context.Context, client.Client, metav1.Object) (*ConfigOwner, error)) { + t.Helper() + + t.Run("should get the owner when present (Machine)", func(t *testing.T) { + g := NewWithT(t) + myMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-machine", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.MachineControlPlaneLabel: "", + }, }, - }, - Spec: clusterv1.MachineSpec{ - ClusterName: "my-cluster", - Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("my-data-secret"), + Spec: clusterv1.MachineSpec{ + ClusterName: "my-cluster", + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To("my-data-secret"), + }, + Version: ptr.To("v1.19.6"), }, - Version: pointer.String("v1.19.6"), - }, - Status: clusterv1.MachineStatus{ - InfrastructureReady: true, - }, - } + Status: clusterv1.MachineStatus{ + InfrastructureReady: true, + }, + } - c := fake.NewClientBuilder().WithObjects(myMachine).Build() - obj := &bootstrapv1.KubeadmConfig{ - ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - Name: "my-machine", + c := fake.NewClientBuilder().WithObjects(myMachine).Build() + obj := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + Name: "my-machine", + }, }, + Namespace: metav1.NamespaceDefault, + Name: "my-resource-owned-by-machine", }, - Namespace: metav1.NamespaceDefault, - Name: "my-resource-owned-by-machine", - }, - } - configOwner, err := GetConfigOwner(ctx, c, obj) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(configOwner).ToNot(BeNil()) - g.Expect(configOwner.ClusterName()).To(BeEquivalentTo("my-cluster")) - g.Expect(configOwner.IsInfrastructureReady()).To(BeTrue()) - g.Expect(configOwner.IsControlPlaneMachine()).To(BeTrue()) - g.Expect(configOwner.IsMachinePool()).To(BeFalse()) - g.Expect(configOwner.KubernetesVersion()).To(Equal("v1.19.6")) - g.Expect(*configOwner.DataSecretName()).To(BeEquivalentTo("my-data-secret")) - }) + } + configOwner, err := getFn(ctx, c, obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(configOwner).ToNot(BeNil()) + g.Expect(configOwner.ClusterName()).To(BeEquivalentTo("my-cluster")) + g.Expect(configOwner.IsInfrastructureReady()).To(BeTrue()) + g.Expect(configOwner.IsControlPlaneMachine()).To(BeTrue()) + g.Expect(configOwner.IsMachinePool()).To(BeFalse()) + g.Expect(configOwner.KubernetesVersion()).To(Equal("v1.19.6")) + g.Expect(*configOwner.DataSecretName()).To(BeEquivalentTo("my-data-secret")) + }) - t.Run("should get the owner when present (MachinePool)", func(t *testing.T) { - _ = feature.MutableGates.Set("MachinePool=true") + t.Run("should get the owner when present (MachinePool)", func(t *testing.T) { + _ = feature.MutableGates.Set("MachinePool=true") - g := NewWithT(t) - myPool := &expv1.MachinePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-machine-pool", - Namespace: metav1.NamespaceDefault, - Labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "", + g := NewWithT(t) + myPool := &expv1.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-machine-pool", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.MachineControlPlaneLabel: "", + }, }, - }, - Spec: expv1.MachinePoolSpec{ - ClusterName: "my-cluster", - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.19.6"), + Spec: expv1.MachinePoolSpec{ + ClusterName: "my-cluster", + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: ptr.To("v1.19.6"), + }, }, }, - }, - Status: expv1.MachinePoolStatus{ - InfrastructureReady: true, - }, - } + Status: expv1.MachinePoolStatus{ + InfrastructureReady: true, + }, + } - c := fake.NewClientBuilder().WithObjects(myPool).Build() - obj := &bootstrapv1.KubeadmConfig{ - ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), - Name: "my-machine-pool", + c := fake.NewClientBuilder().WithObjects(myPool).Build() + obj := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Name: "my-machine-pool", + }, }, + Namespace: metav1.NamespaceDefault, + Name: "my-resource-owned-by-machine-pool", }, - Namespace: metav1.NamespaceDefault, - Name: "my-resource-owned-by-machine-pool", - }, - } - configOwner, err := GetConfigOwner(ctx, c, obj) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(configOwner).ToNot(BeNil()) - g.Expect(configOwner.ClusterName()).To(BeEquivalentTo("my-cluster")) - g.Expect(configOwner.IsInfrastructureReady()).To(BeTrue()) - g.Expect(configOwner.IsControlPlaneMachine()).To(BeFalse()) - g.Expect(configOwner.IsMachinePool()).To(BeTrue()) - g.Expect(configOwner.KubernetesVersion()).To(Equal("v1.19.6")) - g.Expect(configOwner.DataSecretName()).To(BeNil()) - }) + } + configOwner, err := getFn(ctx, c, obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(configOwner).ToNot(BeNil()) + g.Expect(configOwner.ClusterName()).To(BeEquivalentTo("my-cluster")) + g.Expect(configOwner.IsInfrastructureReady()).To(BeTrue()) + g.Expect(configOwner.IsControlPlaneMachine()).To(BeFalse()) + g.Expect(configOwner.IsMachinePool()).To(BeTrue()) + g.Expect(configOwner.KubernetesVersion()).To(Equal("v1.19.6")) + g.Expect(configOwner.DataSecretName()).To(BeNil()) + }) - t.Run("return an error when not found", func(t *testing.T) { - g := NewWithT(t) - c := fake.NewClientBuilder().Build() - obj := &bootstrapv1.KubeadmConfig{ - ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - Name: "my-machine", + t.Run("return an error when not found", func(t *testing.T) { + g := NewWithT(t) + c := fake.NewClientBuilder().Build() + obj := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + Name: "my-machine", + }, }, + Namespace: metav1.NamespaceDefault, + Name: "my-resource-owned-by-machine", }, - Namespace: metav1.NamespaceDefault, - Name: "my-resource-owned-by-machine", - }, - } - _, err := GetConfigOwner(ctx, c, obj) - g.Expect(err).To(HaveOccurred()) - }) + } + _, err := getFn(ctx, c, obj) + g.Expect(err).To(HaveOccurred()) + }) - t.Run("return nothing when there is no owner", func(t *testing.T) { - g := NewWithT(t) - c := fake.NewClientBuilder().Build() - obj := &bootstrapv1.KubeadmConfig{ - ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: []metav1.OwnerReference{}, - Namespace: metav1.NamespaceDefault, - Name: "my-resource-owned-by-machine", - }, - } - configOwner, err := GetConfigOwner(ctx, c, obj) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(configOwner).To(BeNil()) + t.Run("return nothing when there is no owner", func(t *testing.T) { + g := NewWithT(t) + c := fake.NewClientBuilder().Build() + obj := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{}, + Namespace: metav1.NamespaceDefault, + Name: "my-resource-owned-by-machine", + }, + } + configOwner, err := getFn(ctx, c, obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(configOwner).To(BeNil()) + }) + } + t.Run("uncached", func(t *testing.T) { + doTests(t, GetConfigOwner) + }) + t.Run("cached", func(t *testing.T) { + doTests(t, GetTypedConfigOwner) }) } @@ -172,7 +184,8 @@ func TestHasNodeRefs(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ - Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", }, ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -193,7 +206,8 @@ func TestHasNodeRefs(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ - Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", }, ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -225,7 +239,8 @@ func TestHasNodeRefs(t *testing.T) { { // No replicas specified (default is 1). No nodeRefs either. TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, @@ -235,27 +250,29 @@ func TestHasNodeRefs(t *testing.T) { { // 1 replica but no nodeRefs TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, Name: "machine-pool-name", }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, }, { // 2 replicas but only 1 nodeRef TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, Name: "machine-pool-name", }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: expv1.MachinePoolStatus{ NodeRefs: []corev1.ObjectReference{ @@ -288,7 +305,8 @@ func TestHasNodeRefs(t *testing.T) { { // 1 replica (default) and 1 nodeRef TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, @@ -307,14 +325,15 @@ func TestHasNodeRefs(t *testing.T) { { // 2 replicas and nodeRefs TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, Name: "machine-pool-name", }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: expv1.MachinePoolStatus{ NodeRefs: []corev1.ObjectReference{ @@ -334,14 +353,15 @@ func TestHasNodeRefs(t *testing.T) { { // 0 replicas and 0 nodeRef TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, Name: "machine-pool-name", }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(0), + Replicas: ptr.To[int32](0), }, }, } diff --git a/cloudbuild-nightly.yaml b/cloudbuild-nightly.yaml index bc5d5ff32842..a99bedf039fd 100644 --- a/cloudbuild-nightly.yaml +++ b/cloudbuild-nightly.yaml @@ -3,9 +3,9 @@ timeout: 2700s options: substitution_option: ALLOW_LOOSE - machineType: 'N1_HIGHCPU_8' + machineType: 'E2_HIGHCPU_8' steps: - - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20221007-ad65926f6b' + - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud@sha256:de53ba7cd20326776a00adb065430a8bb51beaf24876ffcbd4e8f71b74dbc22d' # v20240210-29014a6e3a entrypoint: make env: - DOCKER_CLI_EXPERIMENTAL=enabled diff --git a/cloudbuild.yaml b/cloudbuild.yaml index 81f3f4facbc1..6c11dbf236b0 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -3,19 +3,18 @@ timeout: 2700s options: substitution_option: ALLOW_LOOSE - machineType: 'N1_HIGHCPU_8' + machineType: 'E2_HIGHCPU_8' steps: - - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20221007-ad65926f6b' + - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud@sha256:de53ba7cd20326776a00adb065430a8bb51beaf24876ffcbd4e8f71b74dbc22d' # v20240210-29014a6e3a entrypoint: make env: - DOCKER_CLI_EXPERIMENTAL=enabled - TAG=$_GIT_TAG - PULL_BASE_REF=$_PULL_BASE_REF - DOCKER_BUILDKIT=1 - args: - - release-staging + args: ['release-staging', '-j', '8', '-O'] substitutions: # _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and # can be used as a substitution _GIT_TAG: '12345' - _PULL_BASE_REF: 'dev' + _PULL_BASE_REF: 'dev' \ No newline at end of file diff --git a/cmd/clusterctl/OWNERS b/cmd/clusterctl/OWNERS index 775eb13264ec..a0b401eef881 100644 --- a/cmd/clusterctl/OWNERS +++ b/cmd/clusterctl/OWNERS @@ -5,4 +5,4 @@ approvers: reviewers: - cluster-api-reviewers - - cluster-api-clusterctl-reviewers + - cluster-api-clusterctl-reviewers \ No newline at end of file diff --git a/cmd/clusterctl/api/.import-restrictions b/cmd/clusterctl/api/.import-restrictions new file mode 100644 index 000000000000..a2e1dfd08133 --- /dev/null +++ b/cmd/clusterctl/api/.import-restrictions @@ -0,0 +1,5 @@ +rules: + - selectorRegexp: sigs[.]k8s[.]io/controller-runtime + allowedPrefixes: [] + forbiddenPrefixes: + - "sigs.k8s.io/controller-runtime" diff --git a/cmd/clusterctl/api/v1alpha3/annotations.go b/cmd/clusterctl/api/v1alpha3/annotations.go index 26db20fc67e7..a9436dec3fc2 100644 --- a/cmd/clusterctl/api/v1alpha3/annotations.go +++ b/cmd/clusterctl/api/v1alpha3/annotations.go @@ -25,4 +25,20 @@ const ( // Note: Only CRDs that are referenced by core Cluster API CRDs have to comply with the naming scheme. // See the following issue for more information: https://github.com/kubernetes-sigs/cluster-api/issues/5686#issuecomment-1260897278 SkipCRDNamePreflightCheckAnnotation = "clusterctl.cluster.x-k8s.io/skip-crd-name-preflight-check" + + // DeleteForMoveAnnotation will be set to objects that are going to be deleted from the + // source cluster after being moved to the target cluster during the clusterctl move operation. + // + // It will help any validation webhook to take decision based on it. + DeleteForMoveAnnotation = "clusterctl.cluster.x-k8s.io/delete-for-move" + + // BlockMoveAnnotation prevents the cluster move operation from starting if it is defined on at least one + // of the objects in scope. + // Provider controllers are expected to set the annotation on resources that cannot be instantaneously + // paused and remove the annotation when the resource has been actually paused. + // + // e.g. If this annotation is defined with any value on an InfraMachine resource to be moved when + // `clusterctl move` is invoked, then NO resources for ANY workload cluster will be created on the + // destination management cluster until the annotation is removed. + BlockMoveAnnotation = "clusterctl.cluster.x-k8s.io/block-move" ) diff --git a/cmd/clusterctl/api/v1alpha3/groupversion_info.go b/cmd/clusterctl/api/v1alpha3/groupversion_info.go index b5510698cde6..dffa6182d066 100644 --- a/cmd/clusterctl/api/v1alpha3/groupversion_info.go +++ b/cmd/clusterctl/api/v1alpha3/groupversion_info.go @@ -20,17 +20,26 @@ limitations under the License. package v1alpha3 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( // GroupVersion is group version used to register these objects. GroupVersion = schema.GroupVersion{Group: "clusterctl.cluster.x-k8s.io", Version: "v1alpha3"} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme. - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + // schemeBuilder is used to add go types to the GroupVersionKind scheme. + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme + AddToScheme = schemeBuilder.AddToScheme + + objectTypes = []runtime.Object{} ) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, objectTypes...) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/cmd/clusterctl/api/v1alpha3/labels.go b/cmd/clusterctl/api/v1alpha3/labels.go index 5b848c29c65c..a6dacf143ccd 100644 --- a/cmd/clusterctl/api/v1alpha3/labels.go +++ b/cmd/clusterctl/api/v1alpha3/labels.go @@ -54,6 +54,8 @@ func ManifestLabel(name string, providerType ProviderType) string { return fmt.Sprintf("ipam-%s", name) case RuntimeExtensionProviderType: return fmt.Sprintf("runtime-extension-%s", name) + case AddonProviderType: + return fmt.Sprintf("addon-%s", name) default: return name } diff --git a/cmd/clusterctl/api/v1alpha3/metadata_type.go b/cmd/clusterctl/api/v1alpha3/metadata_type.go index 0eacecbf4346..e2ab85a630ba 100644 --- a/cmd/clusterctl/api/v1alpha3/metadata_type.go +++ b/cmd/clusterctl/api/v1alpha3/metadata_type.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha3 import ( - "github.com/blang/semver" + "github.com/blang/semver/v4" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/version" ) @@ -54,7 +54,7 @@ func (rs ReleaseSeries) newer(release ReleaseSeries) bool { } func init() { - SchemeBuilder.Register(&Metadata{}) + objectTypes = append(objectTypes, &Metadata{}) } // GetReleaseSeriesForVersion returns the release series for a given version. diff --git a/cmd/clusterctl/api/v1alpha3/metadata_type_test.go b/cmd/clusterctl/api/v1alpha3/metadata_type_test.go index ce71a64343a9..6aaa734bd294 100644 --- a/cmd/clusterctl/api/v1alpha3/metadata_type_test.go +++ b/cmd/clusterctl/api/v1alpha3/metadata_type_test.go @@ -65,7 +65,7 @@ func TestGetReleaseSeriesForContract(t *testing.T) { g := NewWithT(t) m := &Metadata{ReleaseSeries: test.releaseSeries} - g.Expect(m.GetReleaseSeriesForContract(test.contract)).To(Equal(test.expectedReleaseSeries)) + g.Expect(m.GetReleaseSeriesForContract(test.contract)).To(BeComparableTo(test.expectedReleaseSeries)) }) } } diff --git a/cmd/clusterctl/api/v1alpha3/provider_type.go b/cmd/clusterctl/api/v1alpha3/provider_type.go index ce5b2bce8d64..0c923646d659 100644 --- a/cmd/clusterctl/api/v1alpha3/provider_type.go +++ b/cmd/clusterctl/api/v1alpha3/provider_type.go @@ -50,7 +50,7 @@ type Provider struct { // WatchedNamespace indicates the namespace where the provider controller is watching. // If empty the provider controller is watching for objects in all namespaces. // - // Deprecated: in clusterctl v1alpha4 all the providers watch all the namespaces; this field will be removed in a future version of this API + // Deprecated: providers complying with the Cluster API v1alpha4 contract or above must watch all namespaces; this field will be removed in a future version of this API // +optional WatchedNamespace string `json:"watchedNamespace,omitempty"` } @@ -95,7 +95,8 @@ func (p *Provider) GetProviderType() ProviderType { InfrastructureProviderType, ControlPlaneProviderType, IPAMProviderType, - RuntimeExtensionProviderType: + RuntimeExtensionProviderType, + AddonProviderType: return t default: return ProviderTypeUnknown @@ -129,6 +130,10 @@ const ( // runtime extensions. RuntimeExtensionProviderType = ProviderType("RuntimeExtensionProvider") + // AddonProviderType is the type associated with codebases that provide + // add-on capabilities. + AddonProviderType = ProviderType("AddonProvider") + // ProviderTypeUnknown is used when the type is unknown. ProviderTypeUnknown = ProviderType("") ) @@ -148,6 +153,8 @@ func (p ProviderType) Order() int { return 4 case RuntimeExtensionProviderType: return 5 + case AddonProviderType: + return 6 default: return 99 } @@ -215,5 +222,5 @@ func (l *ProviderList) filterBy(predicate func(p Provider) bool) []Provider { } func init() { - SchemeBuilder.Register(&Provider{}, &ProviderList{}) + objectTypes = append(objectTypes, &Provider{}, &ProviderList{}) } diff --git a/cmd/clusterctl/api/v1alpha3/zz_generated.deepcopy.go b/cmd/clusterctl/api/v1alpha3/zz_generated.deepcopy.go index aa0625d17fd4..47ff72966254 100644 --- a/cmd/clusterctl/api/v1alpha3/zz_generated.deepcopy.go +++ b/cmd/clusterctl/api/v1alpha3/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. @@ -22,7 +21,7 @@ limitations under the License. package v1alpha3 import ( - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/cmd/clusterctl/client/alpha/client.go b/cmd/clusterctl/client/alpha/client.go index c72386b650dc..0f4bd230a1c3 100644 --- a/cmd/clusterctl/client/alpha/client.go +++ b/cmd/clusterctl/client/alpha/client.go @@ -16,12 +16,6 @@ limitations under the License. package alpha -import "context" - -var ( - ctx = context.TODO() -) - // Client is the alpha client. type Client interface { Rollout() Rollout diff --git a/cmd/clusterctl/client/alpha/kubeadmcontrolplane.go b/cmd/clusterctl/client/alpha/kubeadmcontrolplane.go index bbfe840a1c46..0d8c80e858a7 100644 --- a/cmd/clusterctl/client/alpha/kubeadmcontrolplane.go +++ b/cmd/clusterctl/client/alpha/kubeadmcontrolplane.go @@ -17,6 +17,7 @@ limitations under the License. package alpha import ( + "context" "fmt" "time" @@ -29,9 +30,9 @@ import ( ) // getKubeadmControlPlane retrieves the KubeadmControlPlane object corresponding to the name and namespace specified. -func getKubeadmControlPlane(proxy cluster.Proxy, name, namespace string) (*controlplanev1.KubeadmControlPlane, error) { +func getKubeadmControlPlane(ctx context.Context, proxy cluster.Proxy, name, namespace string) (*controlplanev1.KubeadmControlPlane, error) { kcpObj := &controlplanev1.KubeadmControlPlane{} - c, err := proxy.NewClient() + c, err := proxy.NewClient(ctx) if err != nil { return nil, err } @@ -46,15 +47,15 @@ func getKubeadmControlPlane(proxy cluster.Proxy, name, namespace string) (*contr return kcpObj, nil } -// setRolloutAfter sets KubeadmControlPlane.spec.rolloutAfter. -func setRolloutAfter(proxy cluster.Proxy, name, namespace string) error { +// setRolloutAfterOnKCP sets KubeadmControlPlane.spec.rolloutAfter. +func setRolloutAfterOnKCP(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf(`{"spec":{"rolloutAfter":"%v"}}`, time.Now().Format(time.RFC3339)))) - return patchKubeadmControlPlane(proxy, name, namespace, patch) + return patchKubeadmControlPlane(ctx, proxy, name, namespace, patch) } // patchKubeadmControlPlane applies a patch to a KubeadmControlPlane. -func patchKubeadmControlPlane(proxy cluster.Proxy, name, namespace string, patch client.Patch) error { - cFrom, err := proxy.NewClient() +func patchKubeadmControlPlane(ctx context.Context, proxy cluster.Proxy, name, namespace string, patch client.Patch) error { + cFrom, err := proxy.NewClient(ctx) if err != nil { return err } diff --git a/cmd/clusterctl/client/alpha/machinedeployment.go b/cmd/clusterctl/client/alpha/machinedeployment.go index f5e66d21c5e0..dbbaac049b6c 100644 --- a/cmd/clusterctl/client/alpha/machinedeployment.go +++ b/cmd/clusterctl/client/alpha/machinedeployment.go @@ -17,6 +17,7 @@ limitations under the License. package alpha import ( + "context" "fmt" "strconv" "time" @@ -27,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -35,9 +37,9 @@ import ( ) // getMachineDeployment retrieves the MachineDeployment object corresponding to the name and namespace specified. -func getMachineDeployment(proxy cluster.Proxy, name, namespace string) (*clusterv1.MachineDeployment, error) { +func getMachineDeployment(ctx context.Context, proxy cluster.Proxy, name, namespace string) (*clusterv1.MachineDeployment, error) { mdObj := &clusterv1.MachineDeployment{} - c, err := proxy.NewClient() + c, err := proxy.NewClient(ctx) if err != nil { return nil, err } @@ -52,15 +54,15 @@ func getMachineDeployment(proxy cluster.Proxy, name, namespace string) (*cluster return mdObj, nil } -// setRestartedAtAnnotation sets the restartedAt annotation in the MachineDeployment's spec.template.objectmeta. -func setRestartedAtAnnotation(proxy cluster.Proxy, name, namespace string) error { - patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf(`{"spec":{"template":{"metadata":{"annotations":{"cluster.x-k8s.io/restartedAt":"%v"}}}}}`, time.Now().Format(time.RFC3339)))) - return patchMachineDeployment(proxy, name, namespace, patch) +// setRolloutAfterOnMachineDeployment sets MachineDeployment.spec.rolloutAfter. +func setRolloutAfterOnMachineDeployment(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { + patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf(`{"spec":{"rolloutAfter":"%v"}}`, time.Now().Format(time.RFC3339)))) + return patchMachineDeployment(ctx, proxy, name, namespace, patch) } // patchMachineDeployment applies a patch to a machinedeployment. -func patchMachineDeployment(proxy cluster.Proxy, name, namespace string, patch client.Patch) error { - cFrom, err := proxy.NewClient() +func patchMachineDeployment(ctx context.Context, proxy cluster.Proxy, name, namespace string, patch client.Patch) error { + cFrom, err := proxy.NewClient(ctx) if err != nil { return err } @@ -118,41 +120,42 @@ func findMachineDeploymentRevision(toRevision int64, allMSs []*clusterv1.Machine } // getMachineSetsForDeployment returns a list of MachineSets associated with a MachineDeployment. -func getMachineSetsForDeployment(proxy cluster.Proxy, d *clusterv1.MachineDeployment) ([]*clusterv1.MachineSet, error) { +func getMachineSetsForDeployment(ctx context.Context, proxy cluster.Proxy, md *clusterv1.MachineDeployment) ([]*clusterv1.MachineSet, error) { log := logf.Log - c, err := proxy.NewClient() + c, err := proxy.NewClient(ctx) if err != nil { return nil, err } // List all MachineSets to find those we own but that no longer match our selector. machineSets := &clusterv1.MachineSetList{} - if err := c.List(ctx, machineSets, client.InNamespace(d.Namespace)); err != nil { + if err := c.List(ctx, machineSets, client.InNamespace(md.Namespace)); err != nil { return nil, err } filtered := make([]*clusterv1.MachineSet, 0, len(machineSets.Items)) for idx := range machineSets.Items { ms := &machineSets.Items[idx] + log := log.WithValues("MachineSet", klog.KObj(ms)) // Skip this MachineSet if its controller ref is not pointing to this MachineDeployment - if !metav1.IsControlledBy(ms, d) { - log.V(5).Info("Skipping MachineSet, controller ref does not match MachineDeployment", "machineset", ms.Name) + if !metav1.IsControlledBy(ms, md) { + log.V(5).Info("Skipping MachineSet, controller ref does not match MachineDeployment") continue } - selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) + selector, err := metav1.LabelSelectorAsSelector(&md.Spec.Selector) if err != nil { - log.V(5).Info("Skipping MachineSet, failed to get label selector from spec selector", "machineset", ms.Name) + log.V(5).Info("Skipping MachineSet, failed to get label selector from spec selector") continue } // If a MachineDeployment with a nil or empty selector creeps in, it should match nothing, not everything. if selector.Empty() { - log.V(5).Info("Skipping MachineSet as the selector is empty", "machineset", ms.Name) + log.V(5).Info("Skipping MachineSet as the selector is empty") continue } // Skip this MachineSet if selector does not match if !selector.Matches(labels.Set(ms.Labels)) { - log.V(5).Info("Skipping MachineSet, label mismatch", "machineset", ms.Name) + log.V(5).Info("Skipping MachineSet, label mismatch") continue } filtered = append(filtered, ms) diff --git a/cmd/clusterctl/client/alpha/rollout.go b/cmd/clusterctl/client/alpha/rollout.go index 0e67bba75dd5..8736ae79df0d 100644 --- a/cmd/clusterctl/client/alpha/rollout.go +++ b/cmd/clusterctl/client/alpha/rollout.go @@ -17,6 +17,8 @@ limitations under the License. package alpha import ( + "context" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -40,10 +42,10 @@ var validRollbackResourceTypes = []string{ // Rollout defines the behavior of a rollout implementation. type Rollout interface { - ObjectRestarter(cluster.Proxy, corev1.ObjectReference) error - ObjectPauser(cluster.Proxy, corev1.ObjectReference) error - ObjectResumer(cluster.Proxy, corev1.ObjectReference) error - ObjectRollbacker(cluster.Proxy, corev1.ObjectReference, int64) error + ObjectRestarter(context.Context, cluster.Proxy, corev1.ObjectReference) error + ObjectPauser(context.Context, cluster.Proxy, corev1.ObjectReference) error + ObjectResumer(context.Context, cluster.Proxy, corev1.ObjectReference) error + ObjectRollbacker(context.Context, cluster.Proxy, corev1.ObjectReference, int64) error } var _ Rollout = &rollout{} diff --git a/cmd/clusterctl/client/alpha/rollout_pauser.go b/cmd/clusterctl/client/alpha/rollout_pauser.go index e6407c043a6f..582c2cb7409d 100644 --- a/cmd/clusterctl/client/alpha/rollout_pauser.go +++ b/cmd/clusterctl/client/alpha/rollout_pauser.go @@ -17,6 +17,7 @@ limitations under the License. package alpha import ( + "context" "fmt" "github.com/pkg/errors" @@ -30,28 +31,28 @@ import ( ) // ObjectPauser will issue a pause on the specified cluster-api resource. -func (r *rollout) ObjectPauser(proxy cluster.Proxy, ref corev1.ObjectReference) error { +func (r *rollout) ObjectPauser(ctx context.Context, proxy cluster.Proxy, ref corev1.ObjectReference) error { switch ref.Kind { case MachineDeployment: - deployment, err := getMachineDeployment(proxy, ref.Name, ref.Namespace) + deployment, err := getMachineDeployment(ctx, proxy, ref.Name, ref.Namespace) if err != nil || deployment == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if deployment.Spec.Paused { return errors.Errorf("MachineDeployment is already paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // MachineDeployment is intentionally capitalized. } - if err := pauseMachineDeployment(proxy, ref.Name, ref.Namespace); err != nil { + if err := pauseMachineDeployment(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } case KubeadmControlPlane: - kcp, err := getKubeadmControlPlane(proxy, ref.Name, ref.Namespace) + kcp, err := getKubeadmControlPlane(ctx, proxy, ref.Name, ref.Namespace) if err != nil || kcp == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if annotations.HasPaused(kcp.GetObjectMeta()) { return errors.Errorf("KubeadmControlPlane is already paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // KubeadmControlPlane is intentionally capitalized. } - if err := pauseKubeadmControlPlane(proxy, ref.Name, ref.Namespace); err != nil { + if err := pauseKubeadmControlPlane(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } default: @@ -61,13 +62,13 @@ func (r *rollout) ObjectPauser(proxy cluster.Proxy, ref corev1.ObjectReference) } // pauseMachineDeployment sets Paused to true in the MachineDeployment's spec. -func pauseMachineDeployment(proxy cluster.Proxy, name, namespace string) error { +func pauseMachineDeployment(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"spec\":{\"paused\":%t}}", true))) - return patchMachineDeployment(proxy, name, namespace, patch) + return patchMachineDeployment(ctx, proxy, name, namespace, patch) } // pauseKubeadmControlPlane sets paused annotation to true. -func pauseKubeadmControlPlane(proxy cluster.Proxy, name, namespace string) error { +func pauseKubeadmControlPlane(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"annotations\":{%q: \"%t\"}}}", clusterv1.PausedAnnotation, true))) - return patchKubeadmControlPlane(proxy, name, namespace, patch) + return patchKubeadmControlPlane(ctx, proxy, name, namespace, patch) } diff --git a/cmd/clusterctl/client/alpha/rollout_pauser_test.go b/cmd/clusterctl/client/alpha/rollout_pauser_test.go index 35e6a3622371..ae5d0b939e6e 100644 --- a/cmd/clusterctl/client/alpha/rollout_pauser_test.go +++ b/cmd/clusterctl/client/alpha/rollout_pauser_test.go @@ -146,14 +146,14 @@ func Test_ObjectPauser(t *testing.T) { g := NewWithT(t) r := newRolloutClient() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) - err := r.ObjectPauser(proxy, tt.fields.ref) + err := r.ObjectPauser(context.Background(), proxy, tt.fields.ref) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } g.Expect(err).ToNot(HaveOccurred()) for _, obj := range tt.fields.objs { - cl, err := proxy.NewClient() + cl, err := proxy.NewClient(context.Background()) g.Expect(err).ToNot(HaveOccurred()) key := client.ObjectKeyFromObject(obj) switch obj.(type) { diff --git a/cmd/clusterctl/client/alpha/rollout_restarter.go b/cmd/clusterctl/client/alpha/rollout_restarter.go index 9928b928e45d..d16392d5591a 100644 --- a/cmd/clusterctl/client/alpha/rollout_restarter.go +++ b/cmd/clusterctl/client/alpha/rollout_restarter.go @@ -17,6 +17,7 @@ limitations under the License. package alpha import ( + "context" "time" "github.com/pkg/errors" @@ -27,21 +28,24 @@ import ( ) // ObjectRestarter will issue a restart on the specified cluster-api resource. -func (r *rollout) ObjectRestarter(proxy cluster.Proxy, ref corev1.ObjectReference) error { +func (r *rollout) ObjectRestarter(ctx context.Context, proxy cluster.Proxy, ref corev1.ObjectReference) error { switch ref.Kind { case MachineDeployment: - deployment, err := getMachineDeployment(proxy, ref.Name, ref.Namespace) + deployment, err := getMachineDeployment(ctx, proxy, ref.Name, ref.Namespace) if err != nil || deployment == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if deployment.Spec.Paused { return errors.Errorf("can't restart paused MachineDeployment (run rollout resume first): %v/%v", ref.Kind, ref.Name) } - if err := setRestartedAtAnnotation(proxy, ref.Name, ref.Namespace); err != nil { + if deployment.Spec.RolloutAfter != nil && deployment.Spec.RolloutAfter.After(time.Now()) { + return errors.Errorf("can't update MachineDeployment (remove 'spec.rolloutAfter' first): %v/%v", ref.Kind, ref.Name) + } + if err := setRolloutAfterOnMachineDeployment(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } case KubeadmControlPlane: - kcp, err := getKubeadmControlPlane(proxy, ref.Name, ref.Namespace) + kcp, err := getKubeadmControlPlane(ctx, proxy, ref.Name, ref.Namespace) if err != nil || kcp == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } @@ -51,7 +55,7 @@ func (r *rollout) ObjectRestarter(proxy cluster.Proxy, ref corev1.ObjectReferenc if kcp.Spec.RolloutAfter != nil && kcp.Spec.RolloutAfter.After(time.Now()) { return errors.Errorf("can't update KubeadmControlPlane (remove 'spec.rolloutAfter' first): %v/%v", ref.Kind, ref.Name) } - if err := setRolloutAfter(proxy, ref.Name, ref.Namespace); err != nil { + if err := setRolloutAfterOnKCP(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } default: diff --git a/cmd/clusterctl/client/alpha/rollout_restarter_test.go b/cmd/clusterctl/client/alpha/rollout_restarter_test.go index f968ccf89187..14969e1946f2 100644 --- a/cmd/clusterctl/client/alpha/rollout_restarter_test.go +++ b/cmd/clusterctl/client/alpha/rollout_restarter_test.go @@ -43,7 +43,7 @@ func Test_ObjectRestarter(t *testing.T) { wantRollout bool }{ { - name: "machinedeployment should have restart annotation", + name: "machinedeployment should have rolloutAfter", fields: fields{ objs: []client.Object{ &clusterv1.MachineDeployment{ @@ -67,7 +67,7 @@ func Test_ObjectRestarter(t *testing.T) { wantRollout: true, }, { - name: "paused machinedeployment should not have restart annotation", + name: "paused machinedeployment should not have rolloutAfter", fields: fields{ objs: []client.Object{ &clusterv1.MachineDeployment{ @@ -93,6 +93,33 @@ func Test_ObjectRestarter(t *testing.T) { wantErr: true, wantRollout: false, }, + { + name: "machinedeployment with spec.rolloutAfter should not be updatable", + fields: fields{ + objs: []client.Object{ + &clusterv1.MachineDeployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineDeployment", + APIVersion: "cluster.x-k8s.io/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "md-1", + }, + Spec: clusterv1.MachineDeploymentSpec{ + RolloutAfter: &metav1.Time{Time: time.Now().Local().Add(time.Hour)}, + }, + }, + }, + ref: corev1.ObjectReference{ + Kind: MachineDeployment, + Name: "md-1", + Namespace: "default", + }, + }, + wantErr: true, + wantRollout: false, + }, { name: "kubeadmcontrolplane should have rolloutAfter", fields: fields{ @@ -177,14 +204,14 @@ func Test_ObjectRestarter(t *testing.T) { g := NewWithT(t) r := newRolloutClient() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) - err := r.ObjectRestarter(proxy, tt.fields.ref) + err := r.ObjectRestarter(context.Background(), proxy, tt.fields.ref) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } g.Expect(err).ToNot(HaveOccurred()) for _, obj := range tt.fields.objs { - cl, err := proxy.NewClient() + cl, err := proxy.NewClient(context.Background()) g.Expect(err).ToNot(HaveOccurred()) key := client.ObjectKeyFromObject(obj) switch obj.(type) { @@ -193,9 +220,9 @@ func Test_ObjectRestarter(t *testing.T) { err = cl.Get(context.TODO(), key, md) g.Expect(err).ToNot(HaveOccurred()) if tt.wantRollout { - g.Expect(md.Spec.Template.Annotations).To(HaveKey("cluster.x-k8s.io/restartedAt")) + g.Expect(md.Spec.RolloutAfter).NotTo(BeNil()) } else { - g.Expect(md.Spec.Template.Annotations).ToNot(HaveKey("cluster.x-k8s.io/restartedAt")) + g.Expect(md.Spec.RolloutAfter).To(BeNil()) } case *controlplanev1.KubeadmControlPlane: kcp := &controlplanev1.KubeadmControlPlane{} @@ -204,7 +231,7 @@ func Test_ObjectRestarter(t *testing.T) { if tt.wantRollout { g.Expect(kcp.Spec.RolloutAfter).NotTo(BeNil()) } else { - g.Expect(kcp.Spec.RolloutAfter).To(nil) + g.Expect(kcp.Spec.RolloutAfter).To(BeNil()) } } } diff --git a/cmd/clusterctl/client/alpha/rollout_resumer.go b/cmd/clusterctl/client/alpha/rollout_resumer.go index 7be784b8dfd3..b224e91e87a0 100644 --- a/cmd/clusterctl/client/alpha/rollout_resumer.go +++ b/cmd/clusterctl/client/alpha/rollout_resumer.go @@ -17,6 +17,7 @@ limitations under the License. package alpha import ( + "context" "fmt" "strings" @@ -31,28 +32,28 @@ import ( ) // ObjectResumer will issue a resume on the specified cluster-api resource. -func (r *rollout) ObjectResumer(proxy cluster.Proxy, ref corev1.ObjectReference) error { +func (r *rollout) ObjectResumer(ctx context.Context, proxy cluster.Proxy, ref corev1.ObjectReference) error { switch ref.Kind { case MachineDeployment: - deployment, err := getMachineDeployment(proxy, ref.Name, ref.Namespace) + deployment, err := getMachineDeployment(ctx, proxy, ref.Name, ref.Namespace) if err != nil || deployment == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if !deployment.Spec.Paused { return errors.Errorf("MachineDeployment is not currently paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // MachineDeployment is intentionally capitalized. } - if err := resumeMachineDeployment(proxy, ref.Name, ref.Namespace); err != nil { + if err := resumeMachineDeployment(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } case KubeadmControlPlane: - kcp, err := getKubeadmControlPlane(proxy, ref.Name, ref.Namespace) + kcp, err := getKubeadmControlPlane(ctx, proxy, ref.Name, ref.Namespace) if err != nil || kcp == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if !annotations.HasPaused(kcp.GetObjectMeta()) { return errors.Errorf("KubeadmControlPlane is not currently paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // KubeadmControlPlane is intentionally capitalized. } - if err := resumeKubeadmControlPlane(proxy, ref.Name, ref.Namespace); err != nil { + if err := resumeKubeadmControlPlane(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } default: @@ -62,17 +63,17 @@ func (r *rollout) ObjectResumer(proxy cluster.Proxy, ref corev1.ObjectReference) } // resumeMachineDeployment sets Paused to true in the MachineDeployment's spec. -func resumeMachineDeployment(proxy cluster.Proxy, name, namespace string) error { +func resumeMachineDeployment(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"spec\":{\"paused\":%t}}", false))) - return patchMachineDeployment(proxy, name, namespace, patch) + return patchMachineDeployment(ctx, proxy, name, namespace, patch) } // resumeKubeadmControlPlane removes paused annotation. -func resumeKubeadmControlPlane(proxy cluster.Proxy, name, namespace string) error { +func resumeKubeadmControlPlane(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { // In the paused annotation we must replace slashes to ~1, see https://datatracker.ietf.org/doc/html/rfc6901#section-3. pausedAnnotation := strings.Replace(clusterv1.PausedAnnotation, "/", "~1", -1) patch := client.RawPatch(types.JSONPatchType, []byte(fmt.Sprintf("[{\"op\": \"remove\", \"path\": \"/metadata/annotations/%s\"}]", pausedAnnotation))) - return patchKubeadmControlPlane(proxy, name, namespace, patch) + return patchKubeadmControlPlane(ctx, proxy, name, namespace, patch) } diff --git a/cmd/clusterctl/client/alpha/rollout_resumer_test.go b/cmd/clusterctl/client/alpha/rollout_resumer_test.go index 92ec8da5f757..da85fb930291 100644 --- a/cmd/clusterctl/client/alpha/rollout_resumer_test.go +++ b/cmd/clusterctl/client/alpha/rollout_resumer_test.go @@ -149,14 +149,14 @@ func Test_ObjectResumer(t *testing.T) { g := NewWithT(t) r := newRolloutClient() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) - err := r.ObjectResumer(proxy, tt.fields.ref) + err := r.ObjectResumer(context.Background(), proxy, tt.fields.ref) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } g.Expect(err).ToNot(HaveOccurred()) for _, obj := range tt.fields.objs { - cl, err := proxy.NewClient() + cl, err := proxy.NewClient(context.Background()) g.Expect(err).ToNot(HaveOccurred()) key := client.ObjectKeyFromObject(obj) switch obj.(type) { diff --git a/cmd/clusterctl/client/alpha/rollout_rollbacker.go b/cmd/clusterctl/client/alpha/rollout_rollbacker.go index d2c7e69dfa7f..046a8df4a118 100644 --- a/cmd/clusterctl/client/alpha/rollout_rollbacker.go +++ b/cmd/clusterctl/client/alpha/rollout_rollbacker.go @@ -17,6 +17,8 @@ limitations under the License. package alpha import ( + "context" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -27,17 +29,17 @@ import ( ) // ObjectRollbacker will issue a rollback on the specified cluster-api resource. -func (r *rollout) ObjectRollbacker(proxy cluster.Proxy, ref corev1.ObjectReference, toRevision int64) error { +func (r *rollout) ObjectRollbacker(ctx context.Context, proxy cluster.Proxy, ref corev1.ObjectReference, toRevision int64) error { switch ref.Kind { case MachineDeployment: - deployment, err := getMachineDeployment(proxy, ref.Name, ref.Namespace) + deployment, err := getMachineDeployment(ctx, proxy, ref.Name, ref.Namespace) if err != nil || deployment == nil { return errors.Wrapf(err, "failed to get %v/%v", ref.Kind, ref.Name) } if deployment.Spec.Paused { return errors.Errorf("can't rollback a paused MachineDeployment: please run 'clusterctl rollout resume %v/%v' first", ref.Kind, ref.Name) } - if err := rollbackMachineDeployment(proxy, deployment, toRevision); err != nil { + if err := rollbackMachineDeployment(ctx, proxy, deployment, toRevision); err != nil { return err } default: @@ -47,9 +49,9 @@ func (r *rollout) ObjectRollbacker(proxy cluster.Proxy, ref corev1.ObjectReferen } // rollbackMachineDeployment will rollback to a previous MachineSet revision used by this MachineDeployment. -func rollbackMachineDeployment(proxy cluster.Proxy, d *clusterv1.MachineDeployment, toRevision int64) error { +func rollbackMachineDeployment(ctx context.Context, proxy cluster.Proxy, md *clusterv1.MachineDeployment, toRevision int64) error { log := logf.Log - c, err := proxy.NewClient() + c, err := proxy.NewClient(ctx) if err != nil { return err } @@ -57,7 +59,7 @@ func rollbackMachineDeployment(proxy cluster.Proxy, d *clusterv1.MachineDeployme if toRevision < 0 { return errors.Errorf("revision number cannot be negative: %v", toRevision) } - msList, err := getMachineSetsForDeployment(proxy, d) + msList, err := getMachineSetsForDeployment(ctx, proxy, md) if err != nil { return err } @@ -67,7 +69,7 @@ func rollbackMachineDeployment(proxy cluster.Proxy, d *clusterv1.MachineDeployme return err } log.V(7).Info("Found revision", "revision", msForRevision) - patchHelper, err := patch.NewHelper(d, c) + patchHelper, err := patch.NewHelper(md, c) if err != nil { return err } @@ -75,6 +77,6 @@ func rollbackMachineDeployment(proxy cluster.Proxy, d *clusterv1.MachineDeployme revMSTemplate := *msForRevision.Spec.Template.DeepCopy() delete(revMSTemplate.Labels, clusterv1.MachineDeploymentUniqueLabel) - d.Spec.Template = revMSTemplate - return patchHelper.Patch(ctx, d) + md.Spec.Template = revMSTemplate + return patchHelper.Patch(ctx, md) } diff --git a/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go b/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go index 03e3fbc4dd66..c8024600e06c 100644 --- a/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go +++ b/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -71,7 +71,7 @@ func Test_ObjectRollbacker(t *testing.T) { Name: "md-template", }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("data-secret-name"), + DataSecretName: ptr.To("data-secret-name"), }, }, }, @@ -150,7 +150,7 @@ func Test_ObjectRollbacker(t *testing.T) { Name: "md-template-rollback", }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("data-secret-name-rollback"), + DataSecretName: ptr.To("data-secret-name-rollback"), }, }, }, @@ -241,13 +241,13 @@ func Test_ObjectRollbacker(t *testing.T) { g := NewWithT(t) r := newRolloutClient() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) - err := r.ObjectRollbacker(proxy, tt.fields.ref, tt.fields.toRevision) + err := r.ObjectRollbacker(context.Background(), proxy, tt.fields.ref, tt.fields.toRevision) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } g.Expect(err).ToNot(HaveOccurred()) - cl, err := proxy.NewClient() + cl, err := proxy.NewClient(context.Background()) g.Expect(err).ToNot(HaveOccurred()) key := client.ObjectKeyFromObject(deployment) md := &clusterv1.MachineDeployment{} diff --git a/cmd/clusterctl/client/client.go b/cmd/clusterctl/client/client.go index 05d229503220..78f050b7d41b 100644 --- a/cmd/clusterctl/client/client.go +++ b/cmd/clusterctl/client/client.go @@ -17,6 +17,8 @@ limitations under the License. package client import ( + "context" + clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/alpha" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -31,46 +33,44 @@ type Client interface { GetProvidersConfig() ([]Provider, error) // GetProviderComponents returns the provider components for a given provider with options including targetNamespace. - GetProviderComponents(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) + GetProviderComponents(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) // GenerateProvider returns the provider components for a given provider with options including targetNamespace. - GenerateProvider(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) + GenerateProvider(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) // Init initializes a management cluster by adding the requested list of providers. - Init(options InitOptions) ([]Components, error) + Init(ctx context.Context, options InitOptions) ([]Components, error) // InitImages returns the list of images required for executing the init command. - InitImages(options InitOptions) ([]string, error) + InitImages(ctx context.Context, options InitOptions) ([]string, error) // GetClusterTemplate returns a workload cluster template. - GetClusterTemplate(options GetClusterTemplateOptions) (Template, error) + GetClusterTemplate(ctx context.Context, options GetClusterTemplateOptions) (Template, error) // GetKubeconfig returns the kubeconfig of the workload cluster. - GetKubeconfig(options GetKubeconfigOptions) (string, error) + GetKubeconfig(ctx context.Context, options GetKubeconfigOptions) (string, error) // Delete deletes providers from a management cluster. - Delete(options DeleteOptions) error + Delete(ctx context.Context, options DeleteOptions) error // Move moves all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target management cluster. - Move(options MoveOptions) error + Move(ctx context.Context, options MoveOptions) error - // PlanUpgrade returns a set of suggested Upgrade plans for the cluster, and more specifically: - // - Upgrade to the latest version in the v1alpha3 series: .... - // - Upgrade to the latest version in the v1alpha4 series: .... - PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePlan, error) + // PlanUpgrade returns a set of suggested Upgrade plans for the cluster. + PlanUpgrade(ctx context.Context, options PlanUpgradeOptions) ([]UpgradePlan, error) // PlanCertManagerUpgrade returns a CertManagerUpgradePlan. - PlanCertManagerUpgrade(options PlanUpgradeOptions) (CertManagerUpgradePlan, error) + PlanCertManagerUpgrade(ctx context.Context, options PlanUpgradeOptions) (CertManagerUpgradePlan, error) // ApplyUpgrade executes an upgrade plan. - ApplyUpgrade(options ApplyUpgradeOptions) error + ApplyUpgrade(ctx context.Context, options ApplyUpgradeOptions) error // ProcessYAML provides a direct way to process a yaml and inspect its // variables. - ProcessYAML(options ProcessYAMLOptions) (YamlPrinter, error) + ProcessYAML(ctx context.Context, options ProcessYAMLOptions) (YamlPrinter, error) // DescribeCluster returns the object tree representing the status of a Cluster API cluster. - DescribeCluster(options DescribeClusterOptions) (*tree.ObjectTree, error) + DescribeCluster(ctx context.Context, options DescribeClusterOptions) (*tree.ObjectTree, error) // AlphaClient is an Interface for alpha features in clusterctl AlphaClient @@ -79,15 +79,17 @@ type Client interface { // AlphaClient exposes the alpha features in clusterctl high-level client library. type AlphaClient interface { // RolloutRestart provides rollout restart of cluster-api resources - RolloutRestart(options RolloutRestartOptions) error + RolloutRestart(ctx context.Context, options RolloutRestartOptions) error // RolloutPause provides rollout pause of cluster-api resources - RolloutPause(options RolloutPauseOptions) error + RolloutPause(ctx context.Context, options RolloutPauseOptions) error // RolloutResume provides rollout resume of paused cluster-api resources - RolloutResume(options RolloutResumeOptions) error + RolloutResume(ctx context.Context, options RolloutResumeOptions) error // RolloutUndo provides rollout rollback of cluster-api resources - RolloutUndo(options RolloutUndoOptions) error + RolloutUndo(ctx context.Context, options RolloutUndoOptions) error // TopologyPlan dry runs the topology reconciler - TopologyPlan(options TopologyPlanOptions) (*TopologyPlanOutput, error) + // + // Deprecated: TopologyPlan is deprecated and will be removed in one of the upcoming releases. + TopologyPlan(ctx context.Context, options TopologyPlanOptions) (*TopologyPlanOutput, error) } // YamlPrinter exposes methods that prints the processed template and @@ -115,7 +117,7 @@ type RepositoryClientFactoryInput struct { } // RepositoryClientFactory is a factory of repository.Client from a given input. -type RepositoryClientFactory func(RepositoryClientFactoryInput) (repository.Client, error) +type RepositoryClientFactory func(context.Context, RepositoryClientFactoryInput) (repository.Client, error) // ClusterClientFactoryInput represents the inputs required by the factory. type ClusterClientFactoryInput struct { @@ -156,11 +158,11 @@ func InjectClusterClientFactory(factory ClusterClientFactory) Option { } // New returns a configClient. -func New(path string, options ...Option) (Client, error) { - return newClusterctlClient(path, options...) +func New(ctx context.Context, path string, options ...Option) (Client, error) { + return newClusterctlClient(ctx, path, options...) } -func newClusterctlClient(path string, options ...Option) (*clusterctlClient, error) { +func newClusterctlClient(ctx context.Context, path string, options ...Option) (*clusterctlClient, error) { client := &clusterctlClient{} for _, o := range options { o(client) @@ -169,7 +171,7 @@ func newClusterctlClient(path string, options ...Option) (*clusterctlClient, err // if there is an injected config, use it, otherwise use the default one // provided by the config low level library. if client.configClient == nil { - c, err := config.New(path) + c, err := config.New(ctx, path) if err != nil { return nil, err } @@ -197,8 +199,9 @@ func newClusterctlClient(path string, options ...Option) (*clusterctlClient, err // defaultRepositoryFactory is a RepositoryClientFactory func the uses the default client provided by the repository low level library. func defaultRepositoryFactory(configClient config.Client) RepositoryClientFactory { - return func(input RepositoryClientFactoryInput) (repository.Client, error) { + return func(ctx context.Context, input RepositoryClientFactoryInput) (repository.Client, error) { return repository.New( + ctx, input.Provider, configClient, repository.InjectYamlProcessor(input.Processor), diff --git a/cmd/clusterctl/client/client_test.go b/cmd/clusterctl/client/client_test.go index b1374b805255..36b3d61fd1fe 100644 --- a/cmd/clusterctl/client/client_test.go +++ b/cmd/clusterctl/client/client_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "fmt" "testing" "time" @@ -42,12 +43,14 @@ func TestNewFakeClient(_ *testing.T) { // create a fake config with a provider named P1 and a variable named var repository1Config := config.NewProvider("p1", "url", clusterctlv1.CoreProviderType) - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithVar("var", "value"). WithProvider(repository1Config) // create a fake repository with some YAML files in it (usually matching the list of providers defined in the config) - repository1 := newFakeRepository(repository1Config, config1). + repository1 := newFakeRepository(ctx, repository1Config, config1). WithPaths("root", "components"). WithDefaultVersion("v1.0"). WithFile("v1.0", "components.yaml", []byte("content")) @@ -57,7 +60,7 @@ func TestNewFakeClient(_ *testing.T) { WithObjs() // create a new fakeClient that allows to execute tests on the fake config, the fake repositories and the fake cluster. - newFakeClient(config1). + newFakeClient(context.Background(), config1). WithRepository(repository1). WithCluster(cluster1) } @@ -76,81 +79,81 @@ func (f fakeClient) GetProvidersConfig() ([]Provider, error) { return f.internalClient.GetProvidersConfig() } -func (f fakeClient) GetProviderComponents(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { - return f.internalClient.GetProviderComponents(provider, providerType, options) +func (f fakeClient) GetProviderComponents(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { + return f.internalClient.GetProviderComponents(ctx, provider, providerType, options) } -func (f fakeClient) GenerateProvider(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { - return f.internalClient.GenerateProvider(provider, providerType, options) +func (f fakeClient) GenerateProvider(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { + return f.internalClient.GenerateProvider(ctx, provider, providerType, options) } -func (f fakeClient) GetClusterTemplate(options GetClusterTemplateOptions) (Template, error) { - return f.internalClient.GetClusterTemplate(options) +func (f fakeClient) GetClusterTemplate(ctx context.Context, options GetClusterTemplateOptions) (Template, error) { + return f.internalClient.GetClusterTemplate(ctx, options) } -func (f fakeClient) GetKubeconfig(options GetKubeconfigOptions) (string, error) { - return f.internalClient.GetKubeconfig(options) +func (f fakeClient) GetKubeconfig(ctx context.Context, options GetKubeconfigOptions) (string, error) { + return f.internalClient.GetKubeconfig(ctx, options) } -func (f fakeClient) Init(options InitOptions) ([]Components, error) { - return f.internalClient.Init(options) +func (f fakeClient) Init(ctx context.Context, options InitOptions) ([]Components, error) { + return f.internalClient.Init(ctx, options) } -func (f fakeClient) InitImages(options InitOptions) ([]string, error) { - return f.internalClient.InitImages(options) +func (f fakeClient) InitImages(ctx context.Context, options InitOptions) ([]string, error) { + return f.internalClient.InitImages(ctx, options) } -func (f fakeClient) Delete(options DeleteOptions) error { - return f.internalClient.Delete(options) +func (f fakeClient) Delete(ctx context.Context, options DeleteOptions) error { + return f.internalClient.Delete(ctx, options) } -func (f fakeClient) Move(options MoveOptions) error { - return f.internalClient.Move(options) +func (f fakeClient) Move(ctx context.Context, options MoveOptions) error { + return f.internalClient.Move(ctx, options) } -func (f fakeClient) PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePlan, error) { - return f.internalClient.PlanUpgrade(options) +func (f fakeClient) PlanUpgrade(ctx context.Context, options PlanUpgradeOptions) ([]UpgradePlan, error) { + return f.internalClient.PlanUpgrade(ctx, options) } -func (f fakeClient) PlanCertManagerUpgrade(options PlanUpgradeOptions) (CertManagerUpgradePlan, error) { - return f.internalClient.PlanCertManagerUpgrade(options) +func (f fakeClient) PlanCertManagerUpgrade(ctx context.Context, options PlanUpgradeOptions) (CertManagerUpgradePlan, error) { + return f.internalClient.PlanCertManagerUpgrade(ctx, options) } -func (f fakeClient) ApplyUpgrade(options ApplyUpgradeOptions) error { - return f.internalClient.ApplyUpgrade(options) +func (f fakeClient) ApplyUpgrade(ctx context.Context, options ApplyUpgradeOptions) error { + return f.internalClient.ApplyUpgrade(ctx, options) } -func (f fakeClient) ProcessYAML(options ProcessYAMLOptions) (YamlPrinter, error) { - return f.internalClient.ProcessYAML(options) +func (f fakeClient) ProcessYAML(ctx context.Context, options ProcessYAMLOptions) (YamlPrinter, error) { + return f.internalClient.ProcessYAML(ctx, options) } -func (f fakeClient) RolloutRestart(options RolloutRestartOptions) error { - return f.internalClient.RolloutRestart(options) +func (f fakeClient) RolloutRestart(ctx context.Context, options RolloutRestartOptions) error { + return f.internalClient.RolloutRestart(ctx, options) } -func (f fakeClient) DescribeCluster(options DescribeClusterOptions) (*tree.ObjectTree, error) { - return f.internalClient.DescribeCluster(options) +func (f fakeClient) DescribeCluster(ctx context.Context, options DescribeClusterOptions) (*tree.ObjectTree, error) { + return f.internalClient.DescribeCluster(ctx, options) } -func (f fakeClient) RolloutPause(options RolloutPauseOptions) error { - return f.internalClient.RolloutPause(options) +func (f fakeClient) RolloutPause(ctx context.Context, options RolloutPauseOptions) error { + return f.internalClient.RolloutPause(ctx, options) } -func (f fakeClient) RolloutResume(options RolloutResumeOptions) error { - return f.internalClient.RolloutResume(options) +func (f fakeClient) RolloutResume(ctx context.Context, options RolloutResumeOptions) error { + return f.internalClient.RolloutResume(ctx, options) } -func (f fakeClient) RolloutUndo(options RolloutUndoOptions) error { - return f.internalClient.RolloutUndo(options) +func (f fakeClient) RolloutUndo(ctx context.Context, options RolloutUndoOptions) error { + return f.internalClient.RolloutUndo(ctx, options) } -func (f fakeClient) TopologyPlan(options TopologyPlanOptions) (*cluster.TopologyPlanOutput, error) { - return f.internalClient.TopologyPlan(options) +func (f fakeClient) TopologyPlan(ctx context.Context, options TopologyPlanOptions) (*cluster.TopologyPlanOutput, error) { + return f.internalClient.TopologyPlan(ctx, options) } // newFakeClient returns a clusterctl client that allows to execute tests on a set of fake config, fake repositories and fake clusters. // you can use WithCluster and WithRepository to prepare for the test case. -func newFakeClient(configClient config.Client) *fakeClient { +func newFakeClient(ctx context.Context, configClient config.Client) *fakeClient { fake := &fakeClient{ clusters: map[cluster.Kubeconfig]cluster.Client{}, repositories: map[string]repository.Client{}, @@ -158,7 +161,7 @@ func newFakeClient(configClient config.Client) *fakeClient { fake.configClient = configClient if fake.configClient == nil { - fake.configClient = newFakeConfig() + fake.configClient = newFakeConfig(ctx) } var clusterClientFactory = func(i ClusterClientFactoryInput) (cluster.Client, error) { @@ -170,10 +173,10 @@ func newFakeClient(configClient config.Client) *fakeClient { return fake.clusters[k], nil } - fake.internalClient, _ = newClusterctlClient("fake-config", + fake.internalClient, _ = newClusterctlClient(ctx, "fake-config", InjectConfig(fake.configClient), InjectClusterClientFactory(clusterClientFactory), - InjectRepositoryFactory(func(input RepositoryClientFactoryInput) (repository.Client, error) { + InjectRepositoryFactory(func(_ context.Context, input RepositoryClientFactoryInput) (repository.Client, error) { if _, ok := fake.repositories[input.Provider.ManifestLabel()]; !ok { return nil, errors.Errorf("repository for kubeconfig %q does not exist", input.Provider.ManifestLabel()) } @@ -209,14 +212,14 @@ func newFakeCluster(kubeconfig cluster.Kubeconfig, configClient config.Client) * } fake.fakeProxy = test.NewFakeProxy() - pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error { + pollImmediateWaiter := func(context.Context, time.Duration, time.Duration, wait.ConditionWithContextFunc) error { return nil } fake.internalclient = cluster.New(kubeconfig, configClient, cluster.InjectProxy(fake.fakeProxy), cluster.InjectPollImmediateWaiter(pollImmediateWaiter), - cluster.InjectRepositoryFactory(func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + cluster.InjectRepositoryFactory(func(_ context.Context, provider config.Provider, _ config.Client, _ ...repository.Option) (repository.Client, error) { if _, ok := fake.repositories[provider.Name()]; !ok { return nil, errors.Errorf("repository for kubeconfig %q does not exist", provider.Name()) } @@ -243,19 +246,19 @@ type fakeCertManagerClient struct { var _ cluster.CertManagerClient = &fakeCertManagerClient{} -func (p *fakeCertManagerClient) EnsureInstalled() error { +func (p *fakeCertManagerClient) EnsureInstalled(_ context.Context) error { return nil } -func (p *fakeCertManagerClient) EnsureLatestVersion() error { +func (p *fakeCertManagerClient) EnsureLatestVersion(_ context.Context) error { return nil } -func (p *fakeCertManagerClient) PlanUpgrade() (cluster.CertManagerUpgradePlan, error) { +func (p *fakeCertManagerClient) PlanUpgrade(_ context.Context) (cluster.CertManagerUpgradePlan, error) { return p.certManagerPlan, nil } -func (p *fakeCertManagerClient) Images() ([]string, error) { +func (p *fakeCertManagerClient) Images(_ context.Context) ([]string, error) { return p.images, p.imagesError } @@ -350,10 +353,10 @@ func (f *fakeClusterClient) WithCertManagerClient(client cluster.CertManagerClie // newFakeConfig return a fake implementation of the client for low-level config library. // The implementation uses a FakeReader that stores configuration settings in a map; you can use // the WithVar or WithProvider methods to set the map values. -func newFakeConfig() *fakeConfigClient { +func newFakeConfig(ctx context.Context) *fakeConfigClient { fakeReader := test.NewFakeReader() - client, _ := config.New("fake-config", config.InjectReader(fakeReader)) + client, _ := config.New(ctx, "fake-config", config.InjectReader(fakeReader)) return &fakeConfigClient{ fakeReader: fakeReader, @@ -397,11 +400,11 @@ func (f *fakeConfigClient) WithProvider(provider config.Provider) *fakeConfigCli // newFakeRepository return a fake implementation of the client for low-level repository library. // The implementation stores configuration settings in a map; you can use // the WithPaths or WithDefaultVersion methods to configure the repository and WithFile to set the map values. -func newFakeRepository(provider config.Provider, configClient config.Client) *fakeRepositoryClient { +func newFakeRepository(ctx context.Context, provider config.Provider, configClient config.Client) *fakeRepositoryClient { fakeRepository := repository.NewMemoryRepository() if configClient == nil { - configClient = newFakeConfig() + configClient = newFakeConfig(ctx) } return &fakeRepositoryClient{ @@ -425,8 +428,8 @@ func (f fakeRepositoryClient) DefaultVersion() string { return f.fakeRepository.DefaultVersion() } -func (f fakeRepositoryClient) GetVersions() ([]string, error) { - return f.fakeRepository.GetVersions() +func (f fakeRepositoryClient) GetVersions(ctx context.Context) ([]string, error) { + return f.fakeRepository.GetVersions(ctx) } func (f fakeRepositoryClient) Components() repository.ComponentsClient { @@ -500,14 +503,14 @@ type fakeTemplateClient struct { processor yaml.Processor } -func (f *fakeTemplateClient) Get(flavor, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { +func (f *fakeTemplateClient) Get(ctx context.Context, flavor, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { name := "cluster-template" if flavor != "" { name = fmt.Sprintf("%s-%s", name, flavor) } name = fmt.Sprintf("%s.yaml", name) - content, err := f.fakeRepository.GetFile(f.version, name) + content, err := f.fakeRepository.GetFile(ctx, f.version, name) if err != nil { return nil, err } @@ -528,9 +531,9 @@ type fakeClusterClassClient struct { processor yaml.Processor } -func (f *fakeClusterClassClient) Get(class, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { +func (f *fakeClusterClassClient) Get(ctx context.Context, class, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { name := fmt.Sprintf("clusterclass-%s.yaml", class) - content, err := f.fakeRepository.GetFile(f.version, name) + content, err := f.fakeRepository.GetFile(ctx, f.version, name) if err != nil { return nil, err } @@ -549,8 +552,8 @@ type fakeMetadataClient struct { fakeRepository *repository.MemoryRepository } -func (f *fakeMetadataClient) Get() (*clusterctlv1.Metadata, error) { - content, err := f.fakeRepository.GetFile(f.version, "metadata.yaml") +func (f *fakeMetadataClient) Get(ctx context.Context) (*clusterctlv1.Metadata, error) { + content, err := f.fakeRepository.GetFile(ctx, f.version, "metadata.yaml") if err != nil { return nil, err } @@ -572,12 +575,12 @@ type fakeComponentClient struct { processor yaml.Processor } -func (f *fakeComponentClient) Raw(options repository.ComponentsOptions) ([]byte, error) { - return f.getRawBytes(&options) +func (f *fakeComponentClient) Raw(ctx context.Context, options repository.ComponentsOptions) ([]byte, error) { + return f.getRawBytes(ctx, &options) } -func (f *fakeComponentClient) Get(options repository.ComponentsOptions) (repository.Components, error) { - content, err := f.getRawBytes(&options) +func (f *fakeComponentClient) Get(ctx context.Context, options repository.ComponentsOptions) (repository.Components, error) { + content, err := f.getRawBytes(ctx, &options) if err != nil { return nil, err } @@ -593,11 +596,11 @@ func (f *fakeComponentClient) Get(options repository.ComponentsOptions) (reposit ) } -func (f *fakeComponentClient) getRawBytes(options *repository.ComponentsOptions) ([]byte, error) { +func (f *fakeComponentClient) getRawBytes(ctx context.Context, options *repository.ComponentsOptions) ([]byte, error) { if options.Version == "" { options.Version = f.fakeRepository.DefaultVersion() } path := f.fakeRepository.ComponentsPath() - return f.fakeRepository.GetFile(options.Version, path) + return f.fakeRepository.GetFile(ctx, options.Version, path) } diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-cluster.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-cluster.yaml index 656ba64e8be3..7864e8f4deec 100644 --- a/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-cluster.yaml +++ b/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-cluster.yaml @@ -31,6 +31,11 @@ spec: controlPlane: metadata: {} replicas: 1 + workers: + machinePools: + - class: "default-worker" + name: "mp-0" + replicas: 1 --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerCluster @@ -78,27 +83,16 @@ spec: certSANs: - localhost - 127.0.0.1 - controllerManager: - extraArgs: - enable-hostpath-provisioner: "true" dns: {} etcd: {} networking: {} scheduler: {} initConfiguration: localAPIEndpoint: {} - nodeRegistration: - criSocket: unix:///var/run/containerd/containerd.sock - kubeletExtraArgs: - cgroup-driver: cgroupfs - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. joinConfiguration: discovery: {} - nodeRegistration: - criSocket: unix:///var/run/containerd/containerd.sock - kubeletExtraArgs: - cgroup-driver: cgroupfs - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-second-cluster.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-second-cluster.yaml index a09de8274357..1122bf5675cd 100644 --- a/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-second-cluster.yaml +++ b/cmd/clusterctl/client/cluster/assets/topology-test/existing-my-second-cluster.yaml @@ -79,27 +79,16 @@ spec: certSANs: - localhost - 127.0.0.1 - controllerManager: - extraArgs: - enable-hostpath-provisioner: "true" dns: {} etcd: {} networking: {} scheduler: {} initConfiguration: localAPIEndpoint: {} - nodeRegistration: - criSocket: unix:///var/run/containerd/containerd.sock - kubeletExtraArgs: - cgroup-driver: cgroupfs - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. joinConfiguration: discovery: {} - nodeRegistration: - criSocket: unix:///var/run/containerd/containerd.sock - kubeletExtraArgs: - cgroup-driver: cgroupfs - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/mock-CRDs.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/mock-CRDs.yaml index 20fdfc1aade8..bfa58b2797c2 100644 --- a/cmd/clusterctl/client/cluster/assets/topology-test/mock-CRDs.yaml +++ b/cmd/clusterctl/client/cluster/assets/topology-test/mock-CRDs.yaml @@ -32,8 +32,24 @@ metadata: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + labels: + cluster.x-k8s.io/provider: control-plane-kubeadm + cluster.x-k8s.io/v1beta1: v1beta1 + name: kubeadmconfigtemplates.bootstrap.cluster.x-k8s.io +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + cluster.x-k8s.io/provider: infrastructure-docker + cluster.x-k8s.io/v1beta1: v1beta1 + name: dockermachinetemplates.infrastructure.cluster.x-k8s.io +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: labels: cluster.x-k8s.io/provider: infrastructure-docker cluster.x-k8s.io/v1beta1: v1beta1 - name: dockermachinetemplates.infrastructure.cluster.x-k8s.io \ No newline at end of file + name: dockermachinepooltemplates.infrastructure.cluster.x-k8s.io \ No newline at end of file diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/modified-CP-dockermachinepooltemplate.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/modified-CP-dockermachinepooltemplate.yaml new file mode 100644 index 000000000000..26d1f88532ca --- /dev/null +++ b/cmd/clusterctl/client/cluster/assets/topology-test/modified-CP-dockermachinepooltemplate.yaml @@ -0,0 +1,14 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachinePoolTemplate +metadata: + name: "docker-worker-machinepooltemplate" + namespace: default +spec: + template: + metadata: + labels: + docker-machinepool-template: test-template-worker + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/my-cluster-class.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/my-cluster-class.yaml index a01e1d2ef653..b8a40a026608 100644 --- a/cmd/clusterctl/client/cluster/assets/topology-test/my-cluster-class.yaml +++ b/cmd/clusterctl/client/cluster/assets/topology-test/my-cluster-class.yaml @@ -22,6 +22,22 @@ spec: kind: DockerClusterTemplate name: my-cluster namespace: default + workers: + machinePools: + - class: "default-worker" + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: docker-worker-bootstraptemplate + namespace: default + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachinePoolTemplate + name: docker-worker-machinepooltemplate + namespace: default --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerClusterTemplate @@ -50,26 +66,12 @@ spec: namespace: default kubeadmConfigSpec: clusterConfiguration: - controllerManager: - extraArgs: { enable-hostpath-provisioner: 'true' } apiServer: certSANs: [ localhost, 127.0.0.1 ] initConfiguration: - nodeRegistration: - criSocket: unix:///var/run/containerd/containerd.sock - kubeletExtraArgs: - # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd - # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 - cgroup-driver: cgroupfs - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. joinConfiguration: - nodeRegistration: - criSocket: unix:///var/run/containerd/containerd.sock - kubeletExtraArgs: - # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd - # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 - cgroup-driver: cgroupfs - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. version: v1.21.2 --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 @@ -83,3 +85,26 @@ spec: extraMounts: - containerPath: "/var/run/docker.sock" hostPath: "/var/run/docker.sock" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachinePoolTemplate +metadata: + name: "docker-worker-machinepooltemplate" + namespace: default +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "docker-worker-bootstraptemplate" + namespace: default +spec: + template: + spec: + joinConfiguration: + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. diff --git a/cmd/clusterctl/client/cluster/assets/topology-test/new-clusterclass-and-cluster.yaml b/cmd/clusterctl/client/cluster/assets/topology-test/new-clusterclass-and-cluster.yaml index bf04ec8816aa..fdcf5f56bcd6 100644 --- a/cmd/clusterctl/client/cluster/assets/topology-test/new-clusterclass-and-cluster.yaml +++ b/cmd/clusterctl/client/cluster/assets/topology-test/new-clusterclass-and-cluster.yaml @@ -62,6 +62,31 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerMachineTemplate name: docker-worker-machinetemplate + machinePools: + - class: "default-worker" + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: docker-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachinePoolTemplate + name: docker-worker-machinepooltemplate + - class: "default-worker-2" + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: docker-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachinePoolTemplate + name: docker-worker-machinepooltemplate --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerClusterTemplate @@ -90,26 +115,12 @@ spec: namespace: default kubeadmConfigSpec: clusterConfiguration: - controllerManager: - extraArgs: { enable-hostpath-provisioner: 'true' } apiServer: certSANs: [ localhost, 127.0.0.1 ] initConfiguration: - nodeRegistration: - criSocket: unix:///var/run/containerd/containerd.sock - kubeletExtraArgs: - # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd - # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 - cgroup-driver: cgroupfs - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. joinConfiguration: - nodeRegistration: - criSocket: unix:///var/run/containerd/containerd.sock - kubeletExtraArgs: - # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd - # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 - cgroup-driver: cgroupfs - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. version: v1.21.2 --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 @@ -137,6 +148,17 @@ spec: preLoadImages: - gcr.io/kakaraparthy-devel/kindest/kindnetd:0.5.4 --- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachinePoolTemplate +metadata: + name: "docker-worker-machinepooltemplate" + namespace: default +spec: + template: + spec: + preLoadImages: + - gcr.io/kakaraparthy-devel/kindest/kindnetd:0.5.4 +--- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate metadata: @@ -146,12 +168,7 @@ spec: template: spec: joinConfiguration: - nodeRegistration: - kubeletExtraArgs: - # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd - # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 - cgroup-driver: cgroupfs - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -182,3 +199,10 @@ spec: - class: "default-worker" name: "md-1" replicas: 1 + machinePools: + - class: "default-worker" + name: "mp-0" + replicas: 1 + - class: "default-worker" + name: "mp-1" + replicas: 1 diff --git a/cmd/clusterctl/client/cluster/cert_manager.go b/cmd/clusterctl/client/cluster/cert_manager.go index dc1d3a91984b..b387398891df 100644 --- a/cmd/clusterctl/client/cluster/cert_manager.go +++ b/cmd/clusterctl/client/cluster/cert_manager.go @@ -21,7 +21,7 @@ import ( _ "embed" "time" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -67,18 +67,18 @@ type CertManagerUpgradePlan struct { type CertManagerClient interface { // EnsureInstalled makes sure cert-manager is running and its API is available. // This is required to install a new provider. - EnsureInstalled() error + EnsureInstalled(ctx context.Context) error // EnsureLatestVersion checks the cert-manager version currently installed, and if it is // older than the version currently suggested by clusterctl, upgrades it. - EnsureLatestVersion() error + EnsureLatestVersion(ctx context.Context) error // PlanUpgrade retruns a CertManagerUpgradePlan with information regarding // a cert-manager upgrade if necessary. - PlanUpgrade() (CertManagerUpgradePlan, error) + PlanUpgrade(ctx context.Context) (CertManagerUpgradePlan, error) // Images return the list of images required for installing the cert-manager. - Images() ([]string, error) + Images(ctx context.Context) ([]string, error) } // certManagerClient implements CertManagerClient . @@ -103,9 +103,9 @@ func newCertManagerClient(configClient config.Client, repositoryClientFactory Re } // Images return the list of images required for installing the cert-manager. -func (cm *certManagerClient) Images() ([]string, error) { +func (cm *certManagerClient) Images(ctx context.Context) ([]string, error) { // If cert manager already exists in the cluster, there is no need of additional images for cert-manager. - exists, err := cm.certManagerNamespaceExists() + exists, err := cm.certManagerNamespaceExists(ctx) if err != nil { return nil, err } @@ -119,7 +119,7 @@ func (cm *certManagerClient) Images() ([]string, error) { return nil, err } - objs, err := cm.getManifestObjs(config) + objs, err := cm.getManifestObjs(ctx, config) if err != nil { return nil, err } @@ -131,10 +131,10 @@ func (cm *certManagerClient) Images() ([]string, error) { return images, nil } -func (cm *certManagerClient) certManagerNamespaceExists() (bool, error) { +func (cm *certManagerClient) certManagerNamespaceExists(ctx context.Context) (bool, error) { ns := &corev1.Namespace{} key := client.ObjectKey{Name: certManagerNamespace} - c, err := cm.proxy.NewClient() + c, err := cm.proxy.NewClient(ctx) if err != nil { return false, err } @@ -150,7 +150,7 @@ func (cm *certManagerClient) certManagerNamespaceExists() (bool, error) { // EnsureInstalled makes sure cert-manager is running and its API is available. // This is required to install a new provider. -func (cm *certManagerClient) EnsureInstalled() error { +func (cm *certManagerClient) EnsureInstalled(ctx context.Context) error { log := logf.Log // Checking if a version of cert manager supporting cert-manager-test-resources.yaml is already installed and properly working. @@ -159,27 +159,26 @@ func (cm *certManagerClient) EnsureInstalled() error { return nil } - // Otherwise install cert manager. - // NOTE: this instance of cert-manager will have clusterctl specific annotations that will be used to - // manage the lifecycle of all the components. - return cm.install() -} - -func (cm *certManagerClient) install() error { - log := logf.Log - config, err := cm.configClient.CertManager().Get() if err != nil { return err } - log.Info("Installing cert-manager", "Version", config.Version()) - - // Gets the cert-manager components from the repository. - objs, err := cm.getManifestObjs(config) + objs, err := cm.getManifestObjs(ctx, config) if err != nil { return err } + // Otherwise install cert manager. + // NOTE: this instance of cert-manager will have clusterctl specific annotations that will be used to + // manage the lifecycle of all the components. + return cm.install(ctx, config.Version(), objs) +} + +func (cm *certManagerClient) install(ctx context.Context, version string, objs []unstructured.Unstructured) error { + log := logf.Log + + log.Info("Installing cert-manager", "version", version) + // Install all cert-manager manifests createCertManagerBackoff := newWriteBackoff() objs = utilresource.SortForCreate(objs) @@ -187,8 +186,8 @@ func (cm *certManagerClient) install() error { o := objs[i] // Create the Kubernetes object. // Nb. The operation is wrapped in a retry loop to make ensureCerts more resilient to unexpected conditions. - if err := retryWithExponentialBackoff(createCertManagerBackoff, func() error { - return cm.createObj(o) + if err := retryWithExponentialBackoff(ctx, createCertManagerBackoff, func(ctx context.Context) error { + return cm.createObj(ctx, o) }); err != nil { return err } @@ -198,12 +197,12 @@ func (cm *certManagerClient) install() error { return cm.waitForAPIReady(ctx, true) } -// PlanUpgrade retruns a CertManagerUpgradePlan with information regarding +// PlanUpgrade returns a CertManagerUpgradePlan with information regarding // a cert-manager upgrade if necessary. -func (cm *certManagerClient) PlanUpgrade() (CertManagerUpgradePlan, error) { +func (cm *certManagerClient) PlanUpgrade(ctx context.Context) (CertManagerUpgradePlan, error) { log := logf.Log - objs, err := cm.proxy.ListResources(map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}, certManagerNamespace) + objs, err := cm.proxy.ListResources(ctx, map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}, certManagerNamespace) if err != nil { return CertManagerUpgradePlan{}, errors.Wrap(err, "failed get cert manager components") } @@ -214,25 +213,35 @@ func (cm *certManagerClient) PlanUpgrade() (CertManagerUpgradePlan, error) { return CertManagerUpgradePlan{ExternallyManaged: true}, nil } - log.Info("Checking cert-manager version...") - currentVersion, targetVersion, shouldUpgrade, err := cm.shouldUpgrade(objs) + // Get the list of objects to install. + config, err := cm.configClient.CertManager().Get() + if err != nil { + return CertManagerUpgradePlan{}, err + } + installObjs, err := cm.getManifestObjs(ctx, config) + if err != nil { + return CertManagerUpgradePlan{}, err + } + + log.Info("Checking if cert-manager needs upgrade...") + currentVersion, shouldUpgrade, err := cm.shouldUpgrade(config.Version(), objs, installObjs) if err != nil { return CertManagerUpgradePlan{}, err } return CertManagerUpgradePlan{ From: currentVersion, - To: targetVersion, + To: config.Version(), ShouldUpgrade: shouldUpgrade, }, nil } // EnsureLatestVersion checks the cert-manager version currently installed, and if it is // older than the version currently suggested by clusterctl, upgrades it. -func (cm *certManagerClient) EnsureLatestVersion() error { +func (cm *certManagerClient) EnsureLatestVersion(ctx context.Context) error { log := logf.Log - objs, err := cm.proxy.ListResources(map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}, certManagerNamespace) + objs, err := cm.proxy.ListResources(ctx, map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}, certManagerNamespace) if err != nil { return errors.Wrap(err, "failed get cert manager components") } @@ -243,8 +252,18 @@ func (cm *certManagerClient) EnsureLatestVersion() error { return nil } - log.Info("Checking cert-manager version...") - currentVersion, _, shouldUpgrade, err := cm.shouldUpgrade(objs) + // Get the list of objects to install. + config, err := cm.configClient.CertManager().Get() + if err != nil { + return err + } + installObjs, err := cm.getManifestObjs(ctx, config) + if err != nil { + return err + } + + log.Info("Checking if cert-manager needs upgrade...") + currentVersion, shouldUpgrade, err := cm.shouldUpgrade(config.Version(), objs, installObjs) if err != nil { return err } @@ -256,43 +275,32 @@ func (cm *certManagerClient) EnsureLatestVersion() error { // Migrate CRs to latest CRD storage version, if necessary. // Note: We have to do this before cert-manager is deleted so conversion webhooks still work. - if err := cm.migrateCRDs(); err != nil { + if err := cm.migrateCRDs(ctx, installObjs); err != nil { return err } // delete the cert-manager version currently installed (because it should be upgraded); // NOTE: CRDs, and namespace are preserved in order to avoid deletion of user objects; // web-hooks are preserved to avoid a user attempting to CREATE a cert-manager resource while the upgrade is in progress. - log.Info("Deleting cert-manager", "Version", currentVersion) - if err := cm.deleteObjs(objs); err != nil { + log.Info("Deleting cert-manager", "version", currentVersion) + if err := cm.deleteObjs(ctx, objs); err != nil { return err } // Install cert-manager. - return cm.install() + return cm.install(ctx, config.Version(), installObjs) } -func (cm *certManagerClient) migrateCRDs() error { - config, err := cm.configClient.CertManager().Get() +func (cm *certManagerClient) migrateCRDs(ctx context.Context, installObj []unstructured.Unstructured) error { + c, err := cm.proxy.NewClient(ctx) if err != nil { return err } - // Gets the new cert-manager components from the repository. - objs, err := cm.getManifestObjs(config) - if err != nil { - return err - } - - c, err := cm.proxy.NewClient() - if err != nil { - return err - } - - return newCRDMigrator(c).Run(ctx, objs) + return NewCRDMigrator(c).Run(ctx, installObj) } -func (cm *certManagerClient) deleteObjs(objs []unstructured.Unstructured) error { +func (cm *certManagerClient) deleteObjs(ctx context.Context, objs []unstructured.Unstructured) error { deleteCertManagerBackoff := newWriteBackoff() for i := range objs { obj := objs[i] @@ -306,8 +314,8 @@ func (cm *certManagerClient) deleteObjs(objs []unstructured.Unstructured) error continue } - if err := retryWithExponentialBackoff(deleteCertManagerBackoff, func() error { - if err := cm.deleteObj(obj); err != nil { + if err := retryWithExponentialBackoff(ctx, deleteCertManagerBackoff, func(ctx context.Context) error { + if err := cm.deleteObj(ctx, obj); err != nil { // tolerate NotFound errors when deleting the test resources if apierrors.IsNotFound(err) { return nil @@ -322,16 +330,10 @@ func (cm *certManagerClient) deleteObjs(objs []unstructured.Unstructured) error return nil } -func (cm *certManagerClient) shouldUpgrade(objs []unstructured.Unstructured) (string, string, bool, error) { - config, err := cm.configClient.CertManager().Get() - if err != nil { - return "", "", false, err - } - - desiredVersion := config.Version() +func (cm *certManagerClient) shouldUpgrade(desiredVersion string, objs, installObjs []unstructured.Unstructured) (string, bool, error) { desiredSemVersion, err := semver.ParseTolerant(desiredVersion) if err != nil { - return "", "", false, errors.Wrapf(err, "failed to parse config version [%s] for cert-manager component", desiredVersion) + return "", false, errors.Wrapf(err, "failed to parse config version [%s] for cert-manager component", desiredVersion) } needUpgrade := false @@ -358,17 +360,23 @@ func (cm *certManagerClient) shouldUpgrade(objs []unstructured.Unstructured) (st objSemVersion, err := semver.ParseTolerant(objVersion) if err != nil { - return "", "", false, errors.Wrapf(err, "failed to parse version for cert-manager component %s/%s", obj.GetKind(), obj.GetName()) + return "", false, errors.Wrapf(err, "failed to parse version for cert-manager component %s/%s", obj.GetKind(), obj.GetName()) } c := version.Compare(objSemVersion, desiredSemVersion, version.WithBuildTags()) switch { case c < 0 || c == 2: - // if version < current or same version and different non-numeric build metadata, then upgrade + // The installed version is lower than the desired version or they are equal, but their metadata + // is different non-numerically (see version.WithBuildTags()). Upgrade is required. currentVersion = objVersion needUpgrade = true - case c >= 0: - // the installed version is greater than or equal to one required by clusterctl, so we are ok + case c == 0: + // The installed version is equal to the desired version. Upgrade is required only if the number + // of available objects and objects to install differ. This would act as a re-install. + currentVersion = objVersion + needUpgrade = len(objs) != len(installObjs) + case c > 0: + // The installed version is greater than the desired version. Upgrade is not required. currentVersion = objVersion } @@ -376,7 +384,7 @@ func (cm *certManagerClient) shouldUpgrade(objs []unstructured.Unstructured) (st break } } - return currentVersion, desiredVersion, needUpgrade, nil + return currentVersion, needUpgrade, nil } func (cm *certManagerClient) getWaitTimeout() time.Duration { @@ -394,18 +402,18 @@ func (cm *certManagerClient) getWaitTimeout() time.Duration { return timeoutDuration } -func (cm *certManagerClient) getManifestObjs(certManagerConfig config.CertManager) ([]unstructured.Unstructured, error) { +func (cm *certManagerClient) getManifestObjs(ctx context.Context, certManagerConfig config.CertManager) ([]unstructured.Unstructured, error) { // Given that cert manager components yaml are stored in a repository like providers components yaml, // we are using the same machinery to retrieve the file by using a fake provider object using // the cert manager repository url. certManagerFakeProvider := config.NewProvider("cert-manager", certManagerConfig.URL(), "") - certManagerRepository, err := cm.repositoryClientFactory(certManagerFakeProvider, cm.configClient) + certManagerRepository, err := cm.repositoryClientFactory(ctx, certManagerFakeProvider, cm.configClient) if err != nil { return nil, err } // Gets the cert-manager component yaml from the repository. - file, err := certManagerRepository.Components().Raw(repository.ComponentsOptions{ + file, err := certManagerRepository.Components().Raw(ctx, repository.ComponentsOptions{ Version: certManagerConfig.Version(), }) if err != nil { @@ -468,10 +476,10 @@ func getTestResourcesManifestObjs() ([]unstructured.Unstructured, error) { return objs, nil } -func (cm *certManagerClient) createObj(obj unstructured.Unstructured) error { +func (cm *certManagerClient) createObj(ctx context.Context, obj unstructured.Unstructured) error { log := logf.Log - c, err := cm.proxy.NewClient() + c, err := cm.proxy.NewClient(ctx) if err != nil { return err } @@ -508,11 +516,11 @@ func (cm *certManagerClient) createObj(obj unstructured.Unstructured) error { return nil } -func (cm *certManagerClient) deleteObj(obj unstructured.Unstructured) error { +func (cm *certManagerClient) deleteObj(ctx context.Context, obj unstructured.Unstructured) error { log := logf.Log log.V(5).Info("Deleting", logf.UnstructuredToValues(obj)...) - cl, err := cm.proxy.NewClient() + cl, err := cm.proxy.NewClient(ctx) if err != nil { return err } @@ -526,7 +534,7 @@ func (cm *certManagerClient) deleteObj(obj unstructured.Unstructured) error { // cert-manager API group. // If retry is true, the createObj call will be retried if it fails. Otherwise, the // 'create' operations will only be attempted once. -func (cm *certManagerClient) waitForAPIReady(_ context.Context, retry bool) error { +func (cm *certManagerClient) waitForAPIReady(ctx context.Context, retry bool) error { log := logf.Log // Waits for the cert-manager to be available. if retry { @@ -544,8 +552,8 @@ func (cm *certManagerClient) waitForAPIReady(_ context.Context, retry bool) erro // Create the Kubernetes object. // This is wrapped with a retry as the cert-manager API may not be available // yet, so we need to keep retrying until it is. - if err := cm.pollImmediateWaiter(waitCertManagerInterval, cm.getWaitTimeout(), func() (bool, error) { - if err := cm.createObj(o); err != nil { + if err := cm.pollImmediateWaiter(ctx, waitCertManagerInterval, cm.getWaitTimeout(), func(ctx context.Context) (bool, error) { + if err := cm.createObj(ctx, o); err != nil { // If retrying is disabled, return the error here. if !retry { return false, err @@ -560,8 +568,8 @@ func (cm *certManagerClient) waitForAPIReady(_ context.Context, retry bool) erro deleteCertManagerBackoff := newWriteBackoff() for i := range testObjs { obj := testObjs[i] - if err := retryWithExponentialBackoff(deleteCertManagerBackoff, func() error { - if err := cm.deleteObj(obj); err != nil { + if err := retryWithExponentialBackoff(ctx, deleteCertManagerBackoff, func(ctx context.Context) error { + if err := cm.deleteObj(ctx, obj); err != nil { // tolerate NotFound errors when deleting the test resources if apierrors.IsNotFound(err) { return nil diff --git a/cmd/clusterctl/client/cluster/cert_manager_test.go b/cmd/clusterctl/client/cluster/cert_manager_test.go index 480cbca885d5..cab681d02acb 100644 --- a/cmd/clusterctl/client/cluster/cert_manager_test.go +++ b/cmd/clusterctl/client/cluster/cert_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "testing" "time" @@ -60,8 +61,8 @@ var certManagerNamespaceYaml = []byte("apiVersion: v1\n" + func Test_getManifestObjs(t *testing.T) { g := NewWithT(t) - defaultConfigClient, err := config.New("", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", ""))) - g.Expect(err).NotTo(HaveOccurred()) + defaultConfigClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", ""))) + g.Expect(err).ToNot(HaveOccurred()) type fields struct { configClient config.Client @@ -108,7 +109,7 @@ func Test_getManifestObjs(t *testing.T) { name: "successfully gets the cert-manager components for a custom release", fields: fields{ configClient: func() config.Client { - configClient, err := config.New("", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", "").WithCertManager("", "v1.0.0", ""))) + configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", "").WithCertManager("", "v1.0.0", ""))) g.Expect(err).ToNot(HaveOccurred()) return configClient }(), @@ -124,22 +125,24 @@ func Test_getManifestObjs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + cm := &certManagerClient{ configClient: defaultConfigClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository)) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository)) }, } certManagerConfig, err := cm.configClient.CertManager().Get() g.Expect(err).ToNot(HaveOccurred()) - got, err := cm.getManifestObjs(certManagerConfig) + got, err := cm.getManifestObjs(ctx, certManagerConfig) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) for i := range got { o := &got[i] @@ -165,7 +168,7 @@ func Test_getManifestObjs(t *testing.T) { } func Test_GetTimeout(t *testing.T) { - pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error { + pollImmediateWaiter := func(context.Context, time.Duration, time.Duration, wait.ConditionWithContextFunc) error { return nil } @@ -193,6 +196,7 @@ func Test_GetTimeout(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + cm := newCertManagerClient(tt.config, nil, nil, pollImmediateWaiter) tm := cm.getWaitTimeout() @@ -207,16 +211,17 @@ func Test_shouldUpgrade(t *testing.T) { objs []unstructured.Unstructured } tests := []struct { - name string - configVersion string - args args - wantFromVersion string - wantToVersion string - want bool - wantErr bool + name string + configVersion string + args args + wantFromVersion string + hasDiffInstallObjs bool + want bool + wantErr bool }{ { - name: "Version is not defined (e.g. cluster created with clusterctl < v0.3.9), should upgrade", + name: "Version is not defined (e.g. cluster created with clusterctl < v0.3.9), should upgrade", + configVersion: config.CertManagerDefaultVersion, args: args{ objs: []unstructured.Unstructured{ { @@ -225,12 +230,12 @@ func Test_shouldUpgrade(t *testing.T) { }, }, wantFromVersion: "v0.11.0", - wantToVersion: config.CertManagerDefaultVersion, want: true, wantErr: false, }, { - name: "Version is equal, should not upgrade", + name: "Version is equal, should not upgrade", + configVersion: config.CertManagerDefaultVersion, args: args{ objs: []unstructured.Unstructured{ { @@ -245,7 +250,6 @@ func Test_shouldUpgrade(t *testing.T) { }, }, wantFromVersion: config.CertManagerDefaultVersion, - wantToVersion: config.CertManagerDefaultVersion, want: false, wantErr: false, }, @@ -266,7 +270,6 @@ func Test_shouldUpgrade(t *testing.T) { }, }, wantFromVersion: "v1.5.3", - wantToVersion: "v1.5.3+h4fd4", want: true, wantErr: false, }, @@ -287,7 +290,6 @@ func Test_shouldUpgrade(t *testing.T) { }, }, wantFromVersion: "v1.5.3+h4fd5", - wantToVersion: "v1.5.3+h4fd4", want: true, wantErr: false, }, @@ -308,7 +310,6 @@ func Test_shouldUpgrade(t *testing.T) { }, }, wantFromVersion: "v1.5.3+h4fd5", - wantToVersion: "v1.5.3+h4fd5", want: false, wantErr: false, }, @@ -329,7 +330,6 @@ func Test_shouldUpgrade(t *testing.T) { }, }, wantFromVersion: "v1.5.3+build.2", - wantToVersion: "v1.5.3+build.1", want: false, wantErr: false, }, @@ -350,12 +350,33 @@ func Test_shouldUpgrade(t *testing.T) { }, }, wantFromVersion: "v1.5.3+build.2", - wantToVersion: "v1.5.3+build.3", want: true, wantErr: false, }, { - name: "Version is older, should upgrade", + name: "Version is equal, but should upgrade because objects to install are a different size", + configVersion: config.CertManagerDefaultVersion, + args: args{ + objs: []unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + clusterctlv1.CertManagerVersionAnnotation: config.CertManagerDefaultVersion, + }, + }, + }, + }, + }, + }, + wantFromVersion: config.CertManagerDefaultVersion, + hasDiffInstallObjs: true, + want: true, + wantErr: false, + }, + { + name: "Version is older, should upgrade", + configVersion: config.CertManagerDefaultVersion, args: args{ objs: []unstructured.Unstructured{ { @@ -370,12 +391,12 @@ func Test_shouldUpgrade(t *testing.T) { }, }, wantFromVersion: "v0.11.0", - wantToVersion: config.CertManagerDefaultVersion, want: true, wantErr: false, }, { - name: "Version is newer, should not upgrade", + name: "Version is newer, should not upgrade", + configVersion: config.CertManagerDefaultVersion, args: args{ objs: []unstructured.Unstructured{ { @@ -390,12 +411,13 @@ func Test_shouldUpgrade(t *testing.T) { }, }, wantFromVersion: "v100.0.0", - wantToVersion: config.CertManagerDefaultVersion, want: false, wantErr: false, }, + { - name: "Endpoint are ignored", + name: "Endpoint are ignored", + configVersion: config.CertManagerDefaultVersion, args: args{ objs: []unstructured.Unstructured{ { @@ -411,7 +433,6 @@ func Test_shouldUpgrade(t *testing.T) { }, }, wantFromVersion: "", - wantToVersion: config.CertManagerDefaultVersion, want: false, wantErr: false, }, @@ -419,14 +440,24 @@ func Test_shouldUpgrade(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + proxy := test.NewFakeProxy() fakeConfigClient := newFakeConfig().WithCertManager("", tt.configVersion, "") - pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error { + pollImmediateWaiter := func(context.Context, time.Duration, time.Duration, wait.ConditionWithContextFunc) error { return nil } cm := newCertManagerClient(fakeConfigClient, nil, proxy, pollImmediateWaiter) - fromVersion, toVersion, got, err := cm.shouldUpgrade(tt.args.objs) + // By default, make the installed and to-be-installed objects the same, but if hasDiffInstallObjs is set, + // just append an empty unstructured object at the end to make them different. + installObjs := tt.args.objs + if tt.hasDiffInstallObjs { + installObjs = make([]unstructured.Unstructured, len(tt.args.objs)) + copy(installObjs, tt.args.objs) + installObjs = append(installObjs, unstructured.Unstructured{}) + } + + fromVersion, got, err := cm.shouldUpgrade(tt.configVersion, tt.args.objs, installObjs) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -435,7 +466,6 @@ func Test_shouldUpgrade(t *testing.T) { g.Expect(got).To(Equal(tt.want)) g.Expect(fromVersion).To(Equal(tt.wantFromVersion)) - g.Expect(toVersion).To(Equal(tt.wantToVersion)) }) } } @@ -560,16 +590,18 @@ func Test_certManagerClient_deleteObjs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) cm := &certManagerClient{ pollImmediateWaiter: fakePollImmediateWaiter, proxy: proxy, } - objBefore, err := proxy.ListResources(map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}) + objBefore, err := proxy.ListResources(ctx, map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}) g.Expect(err).ToNot(HaveOccurred()) - err = cm.deleteObjs(objBefore) + err = cm.deleteObjs(ctx, objBefore) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -587,10 +619,10 @@ func Test_certManagerClient_deleteObjs(t *testing.T) { } } - cl, err := proxy.NewClient() + cl, err := proxy.NewClient(ctx) g.Expect(err).ToNot(HaveOccurred()) - err = cl.Get(ctx, client.ObjectKeyFromObject(obj), obj) + err = cl.Get(context.Background(), client.ObjectKeyFromObject(obj), obj) switch objShouldStillExist { case true: g.Expect(err).ToNot(HaveOccurred()) @@ -704,17 +736,29 @@ func Test_certManagerClient_PlanUpgrade(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + proxy := test.NewFakeProxy().WithObjs(tt.objs...) fakeConfigClient := newFakeConfig() - pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error { + pollImmediateWaiter := func(context.Context, time.Duration, time.Duration, wait.ConditionWithContextFunc) error { return nil } - cm := newCertManagerClient(fakeConfigClient, nil, proxy, pollImmediateWaiter) - actualPlan, err := cm.PlanUpgrade() + // Prepare a fake memory repo from which getManifestObjs(), called from PlanUpgrade() will fetch to-be-installed objects. + fakeRepositoryClientFactory := func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { + repo := repository.NewMemoryRepository(). + WithPaths("root", "components.yaml"). + WithDefaultVersion(config.CertManagerDefaultVersion). + WithFile(config.CertManagerDefaultVersion, "components.yaml", certManagerDeploymentYaml) + return repository.New(ctx, provider, configClient, repository.InjectRepository(repo)) + } + + cm := newCertManagerClient(fakeConfigClient, fakeRepositoryClientFactory, proxy, pollImmediateWaiter) + + actualPlan, err := cm.PlanUpgrade(ctx) if tt.expectErr { g.Expect(err).To(HaveOccurred()) - g.Expect(actualPlan).To(Equal(CertManagerUpgradePlan{})) + g.Expect(actualPlan).To(BeComparableTo(CertManagerUpgradePlan{})) return } g.Expect(err).ToNot(HaveOccurred()) @@ -754,7 +798,7 @@ func Test_certManagerClient_EnsureLatestVersion(t *testing.T) { proxy: tt.fields.proxy, } - err := cm.EnsureLatestVersion() + err := cm.EnsureLatestVersion(context.Background()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -767,7 +811,7 @@ func Test_certManagerClient_EnsureLatestVersion(t *testing.T) { func newFakeConfig() *fakeConfigClient { fakeReader := test.NewFakeReader() - client, _ := config.New("fake-config", config.InjectReader(fakeReader)) + client, _ := config.New(context.Background(), "fake-config", config.InjectReader(fakeReader)) return &fakeConfigClient{ fakeReader: fakeReader, internalclient: client, diff --git a/cmd/clusterctl/client/cluster/client.go b/cmd/clusterctl/client/cluster/client.go index bd34d912581b..a6c0e3b87868 100644 --- a/cmd/clusterctl/client/cluster/client.go +++ b/cmd/clusterctl/client/cluster/client.go @@ -29,10 +29,6 @@ import ( logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" ) -var ( - ctx = context.TODO() -) - // Kubeconfig is a type that specifies inputs related to the actual // kubeconfig. type Kubeconfig struct { @@ -89,7 +85,7 @@ type Client interface { } // PollImmediateWaiter tries a condition func until it returns true, an error, or the timeout is reached. -type PollImmediateWaiter func(interval, timeout time.Duration, condition wait.ConditionFunc) error +type PollImmediateWaiter func(ctx context.Context, interval, timeout time.Duration, condition wait.ConditionWithContextFunc) error // clusterClient implements Client. type clusterClient struct { @@ -102,7 +98,7 @@ type clusterClient struct { } // RepositoryClientFactory defines a function that returns a new repository.Client. -type RepositoryClientFactory func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) +type RepositoryClientFactory func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) // ensure clusterClient implements Client. var _ Client = &clusterClient{} @@ -214,22 +210,24 @@ func newClusterClient(kubeconfig Kubeconfig, configClient config.Client, options // if there is an injected PollImmediateWaiter, use it, otherwise use the default one if client.pollImmediateWaiter == nil { - client.pollImmediateWaiter = wait.PollImmediate + client.pollImmediateWaiter = func(ctx context.Context, interval, timeout time.Duration, condition wait.ConditionWithContextFunc) error { + return wait.PollUntilContextTimeout(ctx, interval, timeout, true, condition) + } } return client } // retryWithExponentialBackoff repeats an operation until it passes or the exponential backoff times out. -func retryWithExponentialBackoff(opts wait.Backoff, operation func() error) error { +func retryWithExponentialBackoff(ctx context.Context, opts wait.Backoff, operation func(ctx context.Context) error) error { log := logf.Log i := 0 - err := wait.ExponentialBackoff(opts, func() (bool, error) { + err := wait.ExponentialBackoffWithContext(ctx, opts, func(ctx context.Context) (bool, error) { i++ - if err := operation(); err != nil { + if err := operation(ctx); err != nil { if i < opts.Steps { - log.V(5).Info("Retrying with backoff", "Cause", err.Error()) + log.V(5).Info("Retrying with backoff", "cause", err.Error()) return false, nil } return false, err diff --git a/cmd/clusterctl/client/cluster/components.go b/cmd/clusterctl/client/cluster/components.go index 3f2334610483..bf5a7be3870b 100644 --- a/cmd/clusterctl/client/cluster/components.go +++ b/cmd/clusterctl/client/cluster/components.go @@ -17,16 +17,20 @@ limitations under the License. package cluster import ( + "context" "fmt" "strings" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -54,17 +58,20 @@ type DeleteOptions struct { // ComponentsClient has methods to work with provider components in the cluster. type ComponentsClient interface { // Create creates the provider components in the management cluster. - Create(objs []unstructured.Unstructured) error + Create(ctx context.Context, objs []unstructured.Unstructured) error // Delete deletes the provider components from the management cluster. // The operation is designed to prevent accidental deletion of user created objects, so // it is required to explicitly opt-in for the deletion of the namespace where the provider components are hosted // and for the deletion of the provider's CRDs. - Delete(options DeleteOptions) error + Delete(ctx context.Context, options DeleteOptions) error // DeleteWebhookNamespace deletes the core provider webhook namespace (eg. capi-webhook-system). // This is required when upgrading to v1alpha4 where webhooks are included in the controller itself. - DeleteWebhookNamespace() error + DeleteWebhookNamespace(ctx context.Context) error + + // ValidateNoObjectsExist checks if custom resources of the custom resource definitions exist and returns an error if so. + ValidateNoObjectsExist(ctx context.Context, provider clusterctlv1.Provider) error } // providerComponents implements ComponentsClient. @@ -72,15 +79,15 @@ type providerComponents struct { proxy Proxy } -func (p *providerComponents) Create(objs []unstructured.Unstructured) error { +func (p *providerComponents) Create(ctx context.Context, objs []unstructured.Unstructured) error { createComponentObjectBackoff := newWriteBackoff() for i := range objs { obj := objs[i] // Create the Kubernetes object. // Nb. The operation is wrapped in a retry loop to make Create more resilient to unexpected conditions. - if err := retryWithExponentialBackoff(createComponentObjectBackoff, func() error { - return p.createObj(obj) + if err := retryWithExponentialBackoff(ctx, createComponentObjectBackoff, func(ctx context.Context) error { + return p.createObj(ctx, obj) }); err != nil { return err } @@ -89,9 +96,9 @@ func (p *providerComponents) Create(objs []unstructured.Unstructured) error { return nil } -func (p *providerComponents) createObj(obj unstructured.Unstructured) error { +func (p *providerComponents) createObj(ctx context.Context, obj unstructured.Unstructured) error { log := logf.Log - c, err := p.proxy.NewClient() + c, err := p.proxy.NewClient(ctx) if err != nil { return err } @@ -127,9 +134,9 @@ func (p *providerComponents) createObj(obj unstructured.Unstructured) error { return nil } -func (p *providerComponents) Delete(options DeleteOptions) error { +func (p *providerComponents) Delete(ctx context.Context, options DeleteOptions) error { log := logf.Log - log.Info("Deleting", "Provider", options.Provider.Name, "Version", options.Provider.Version, "Namespace", options.Provider.Namespace) + log.Info("Deleting", "Provider", klog.KObj(&options.Provider), "providerVersion", options.Provider.Version) // Fetch all the components belonging to a provider. // We want that the delete operation is able to clean-up everything. @@ -139,7 +146,7 @@ func (p *providerComponents) Delete(options DeleteOptions) error { } namespaces := []string{options.Provider.Namespace} - resources, err := p.proxy.ListResources(labels, namespaces...) + resources, err := p.proxy.ListResources(ctx, labels, namespaces...) if err != nil { return err } @@ -198,7 +205,7 @@ func (p *providerComponents) Delete(options DeleteOptions) error { } // Delete all the provider components. - cs, err := p.proxy.NewClient() + cs, err := p.proxy.NewClient(ctx) if err != nil { return err } @@ -216,7 +223,7 @@ func (p *providerComponents) Delete(options DeleteOptions) error { // Otherwise delete the object log.V(5).Info("Deleting", logf.UnstructuredToValues(obj)...) deleteBackoff := newWriteBackoff() - if err := retryWithExponentialBackoff(deleteBackoff, func() error { + if err := retryWithExponentialBackoff(ctx, deleteBackoff, func(ctx context.Context) error { if err := cs.Delete(ctx, &obj); err != nil { if apierrors.IsNotFound(err) { // Tolerate IsNotFound error that might happen because we are not enforcing a deletion order @@ -234,13 +241,13 @@ func (p *providerComponents) Delete(options DeleteOptions) error { return kerrors.NewAggregate(errList) } -func (p *providerComponents) DeleteWebhookNamespace() error { +func (p *providerComponents) DeleteWebhookNamespace(ctx context.Context) error { const webhookNamespaceName = "capi-webhook-system" log := logf.Log log.V(5).Info("Deleting", "namespace", webhookNamespaceName) - c, err := p.proxy.NewClient() + c, err := p.proxy.NewClient(ctx) if err != nil { return err } @@ -256,6 +263,59 @@ func (p *providerComponents) DeleteWebhookNamespace() error { return nil } +func (p *providerComponents) ValidateNoObjectsExist(ctx context.Context, provider clusterctlv1.Provider) error { + log := logf.Log + log.Info("Checking for CRs", "Provider", klog.KObj(&provider), "providerVersion", provider.Version) + + proxyClient, err := p.proxy.NewClient(ctx) + if err != nil { + return err + } + + // Fetch all the components belonging to a provider. + // We want that the delete operation is able to clean-up everything. + labels := map[string]string{ + clusterctlv1.ClusterctlLabel: "", + clusterv1.ProviderNameLabel: provider.ManifestLabel(), + } + + customResources := &apiextensionsv1.CustomResourceDefinitionList{} + if err := proxyClient.List(ctx, customResources, client.MatchingLabels(labels)); err != nil { + return err + } + + // Filter the resources according to the delete options + crsHavingObjects := []string{} + for _, crd := range customResources.Items { + crd := crd + storageVersion, err := storageVersionForCRD(&crd) + if err != nil { + return err + } + + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(schema.GroupVersionKind{ + Group: crd.Spec.Group, + Version: storageVersion, + Kind: crd.Spec.Names.ListKind, + }) + + if err := proxyClient.List(ctx, list); err != nil { + return err + } + + if len(list.Items) > 0 { + crsHavingObjects = append(crsHavingObjects, crd.Kind) + } + } + + if len(crsHavingObjects) > 0 { + return fmt.Errorf("found existing objects for provider CRDs %q: [%s]. Please delete these objects first before running clusterctl delete with --include-crd", provider.GetName(), strings.Join(crsHavingObjects, ", ")) + } + + return nil +} + // newComponentsClient returns a providerComponents. func newComponentsClient(proxy Proxy) *providerComponents { return &providerComponents{ diff --git a/cmd/clusterctl/client/cluster/components_test.go b/cmd/clusterctl/client/cluster/components_test.go index a3fa9dd7529b..1c430f3d350f 100644 --- a/cmd/clusterctl/client/cluster/components_test.go +++ b/cmd/clusterctl/client/cluster/components_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "testing" @@ -24,6 +25,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -256,7 +258,7 @@ func Test_providerComponents_Delete(t *testing.T) { c := newComponentsClient(proxy) - err := c.Delete(DeleteOptions{ + err := c.Delete(context.Background(), DeleteOptions{ Provider: tt.args.provider, IncludeNamespace: tt.args.includeNamespace, IncludeCRDs: tt.args.includeCRD, @@ -267,10 +269,10 @@ func Test_providerComponents_Delete(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) - cs, err := proxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + cs, err := proxy.NewClient(context.Background()) + g.Expect(err).ToNot(HaveOccurred()) for _, want := range tt.wantDiff { obj := &unstructured.Unstructured{} @@ -282,7 +284,7 @@ func Test_providerComponents_Delete(t *testing.T) { Name: want.object.Name, } - err := cs.Get(ctx, key, obj) + err := cs.Get(context.Background(), key, obj) if err != nil && !apierrors.IsNotFound(err) { t.Fatalf("Failed to get %v from the cluster: %v", key, err) } @@ -321,19 +323,19 @@ func Test_providerComponents_DeleteCoreProviderWebhookNamespace(t *testing.T) { } proxy := test.NewFakeProxy().WithObjs(initObjs...) - proxyClient, _ := proxy.NewClient() + proxyClient, _ := proxy.NewClient(context.Background()) var nsList corev1.NamespaceList // assert length before deleting - _ = proxyClient.List(ctx, &nsList) + _ = proxyClient.List(context.Background(), &nsList) g.Expect(nsList.Items).Should(HaveLen(1)) c := newComponentsClient(proxy) - err := c.DeleteWebhookNamespace() + err := c.DeleteWebhookNamespace(context.Background()) g.Expect(err).To(Not(HaveOccurred())) // assert length after deleting - _ = proxyClient.List(ctx, &nsList) + _ = proxyClient.List(context.Background(), &nsList) g.Expect(nsList.Items).Should(BeEmpty()) }) } @@ -444,20 +446,20 @@ func Test_providerComponents_Create(t *testing.T) { for _, obj := range tt.args.objectsToCreate { uns := &unstructured.Unstructured{} if err := scheme.Scheme.Convert(obj, uns, nil); err != nil { - g.Expect(fmt.Errorf("%v %v could not be converted to unstructured", err.Error(), obj)).NotTo(HaveOccurred()) + g.Expect(fmt.Errorf("%v %v could not be converted to unstructured", err.Error(), obj)).ToNot(HaveOccurred()) } unstructuredObjectsToCreate = append(unstructuredObjectsToCreate, *uns) } - err := c.Create(unstructuredObjectsToCreate) + err := c.Create(context.Background(), unstructuredObjectsToCreate) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) - cs, err := proxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + cs, err := proxy.NewClient(context.Background()) + g.Expect(err).ToNot(HaveOccurred()) for _, item := range tt.want { obj := &unstructured.Unstructured{} @@ -468,7 +470,7 @@ func Test_providerComponents_Create(t *testing.T) { Name: item.GetName(), } - err := cs.Get(ctx, key, obj) + err := cs.Get(context.Background(), key, obj) if err != nil && !apierrors.IsNotFound(err) { t.Fatalf("Failed to get %v from the cluster: %v", key, err) @@ -479,14 +481,14 @@ func Test_providerComponents_Create(t *testing.T) { if item.GetObjectKind().GroupVersionKind().Kind == "Pod" { p1, okp1 := item.(*corev1.Pod) if !(okp1) { - g.Expect(fmt.Errorf("%v %v could retrieve pod", err.Error(), obj)).NotTo(HaveOccurred()) + g.Expect(fmt.Errorf("%v %v could retrieve pod", err.Error(), obj)).ToNot(HaveOccurred()) } p2 := &corev1.Pod{} if err := scheme.Scheme.Convert(obj, p2, nil); err != nil { - g.Expect(fmt.Errorf("%v %v could not be converted to unstructured", err.Error(), obj)).NotTo(HaveOccurred()) + g.Expect(fmt.Errorf("%v %v could not be converted to unstructured", err.Error(), obj)).ToNot(HaveOccurred()) } if len(p1.Spec.Containers) == 0 || len(p2.Spec.Containers) == 0 { - g.Expect(fmt.Errorf("%v %v could not be converted to unstructured", err.Error(), obj)).NotTo(HaveOccurred()) + g.Expect(fmt.Errorf("%v %v could not be converted to unstructured", err.Error(), obj)).ToNot(HaveOccurred()) } g.Expect(p1.Spec.Containers[0].Image).To(Equal(p2.Spec.Containers[0].Image), cmp.Diff(obj.GetNamespace(), item.GetNamespace())) } @@ -494,3 +496,78 @@ func Test_providerComponents_Create(t *testing.T) { }) } } + +func Test_providerComponents_ValidateNoObjectsExist(t *testing.T) { + labels := map[string]string{ + clusterv1.ProviderNameLabel: "infrastructure-infra", + } + + crd := &apiextensionsv1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + Kind: "CustomResourceDefinition", + APIVersion: apiextensionsv1.SchemeGroupVersion.Identifier(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "crd1", + Labels: labels, + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "some.group", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + ListKind: "SomeCRDList", + Kind: "SomeCRD", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + {Name: "v1", Storage: true}, + }, + }, + } + crd.ObjectMeta.Labels[clusterctlv1.ClusterctlLabel] = "" + + cr := &unstructured.Unstructured{} + cr.SetAPIVersion("some.group/v1") + cr.SetKind("SomeCRD") + cr.SetName("cr1") + + tests := []struct { + name string + provider clusterctlv1.Provider + initObjs []client.Object + wantErr bool + }{ + { + name: "No objects exist", + provider: clusterctlv1.Provider{ObjectMeta: metav1.ObjectMeta{Name: "infrastructure-infra", Namespace: "ns1"}, ProviderName: "infra", Type: string(clusterctlv1.InfrastructureProviderType)}, + initObjs: []client.Object{}, + wantErr: false, + }, + { + name: "CRD exists but no objects", + provider: clusterctlv1.Provider{ObjectMeta: metav1.ObjectMeta{Name: "infrastructure-infra", Namespace: "ns1"}, ProviderName: "infra", Type: string(clusterctlv1.InfrastructureProviderType)}, + initObjs: []client.Object{ + crd, + }, + wantErr: false, + }, + { + name: "CRD exists but and also objects", + provider: clusterctlv1.Provider{ObjectMeta: metav1.ObjectMeta{Name: "infrastructure-infra", Namespace: "ns1"}, ProviderName: "infra", Type: string(clusterctlv1.InfrastructureProviderType)}, + initObjs: []client.Object{ + crd, + cr, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + proxy := test.NewFakeProxy().WithObjs(tt.initObjs...) + + c := newComponentsClient(proxy) + + if err := c.ValidateNoObjectsExist(context.Background(), tt.provider); (err != nil) != tt.wantErr { + t.Errorf("providerComponents.ValidateNoObjectsExist() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/cmd/clusterctl/client/cluster/crd_migration.go b/cmd/clusterctl/client/cluster/crd_migration.go index b13bfc789d8a..08a62a85df76 100644 --- a/cmd/clusterctl/client/cluster/crd_migration.go +++ b/cmd/clusterctl/client/cluster/crd_migration.go @@ -29,12 +29,19 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/scheme" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" ) +// CRDMigrator interface defines methods for migrating CRs to the storage version of new CRDs. +type CRDMigrator interface { + Run(ctx context.Context, objs []unstructured.Unstructured) error +} + // crdMigrator migrates CRs to the storage version of new CRDs. // This is necessary when the new CRD drops a version which // was previously used as a storage version. @@ -42,8 +49,8 @@ type crdMigrator struct { Client client.Client } -// newCRDMigrator creates a new CRD migrator. -func newCRDMigrator(client client.Client) *crdMigrator { +// NewCRDMigrator creates a new CRD migrator. +func NewCRDMigrator(client client.Client) CRDMigrator { return &crdMigrator{ Client: client, } @@ -71,8 +78,8 @@ func (m *crdMigrator) Run(ctx context.Context, objs []unstructured.Unstructured) } // run migrates CRs of a new CRD. -// This is necessary when the new CRD drops a version which -// was previously used as a storage version. +// This is necessary when the new CRD drops or stops serving +// a version which was previously used as a storage version. func (m *crdMigrator) run(ctx context.Context, newCRD *apiextensionsv1.CustomResourceDefinition) (bool, error) { log := logf.Log @@ -84,7 +91,7 @@ func (m *crdMigrator) run(ctx context.Context, newCRD *apiextensionsv1.CustomRes // Get the current CRD. currentCRD := &apiextensionsv1.CustomResourceDefinition{} - if err := retryWithExponentialBackoff(newReadBackoff(), func() error { + if err := retryWithExponentialBackoff(ctx, newReadBackoff(), func(ctx context.Context) error { return m.Client.Get(ctx, client.ObjectKeyFromObject(newCRD), currentCRD) }); err != nil { // Return if the CRD doesn't exist yet. We only have to migrate if the CRD exists already. @@ -106,24 +113,22 @@ func (m *crdMigrator) run(ctx context.Context, newCRD *apiextensionsv1.CustomRes } currentStatusStoredVersions := sets.Set[string]{}.Insert(currentCRD.Status.StoredVersions...) - - // If the new CRD still contains all current stored versions, nothing to do - // as no previous storage version will be dropped. - if newVersions.HasAll(currentStatusStoredVersions.UnsortedList()...) { - log.V(2).Info("CRD migration check passed", "name", newCRD.Name) + // If the old CRD only contains its current storageVersion as storedVersion, + // nothing to do as all objects are already on the current storageVersion. + // Note: We want to migrate objects to new storage versions as soon as possible + // to prevent unnecessary conversion webhook calls. + if currentStatusStoredVersions.Len() == 1 && currentCRD.Status.StoredVersions[0] == currentStorageVersion { + log.V(2).Info("CRD migration check passed", "CustomResourceDefinition", klog.KObj(newCRD)) return false, nil } - // Otherwise a version that has been used as storage version will be dropped, so it is necessary to migrate all the - // objects and drop the storage version from the current CRD status before installing the new CRD. - // Ref https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#writing-reading-and-updating-versioned-customresourcedefinition-objects // Note: We are simply migrating all CR objects independent of the version in which they are actually stored in etcd. // This way we can make sure that all CR objects are now stored in the current storage version. // Alternatively, we would have to figure out which objects are stored in which version but this information is not // exposed by the apiserver. - storedVersionsToDelete := currentStatusStoredVersions.Difference(newVersions) - storedVersionsToPreserve := currentStatusStoredVersions.Intersection(newVersions) - log.Info("CR migration required", "kind", newCRD.Spec.Names.Kind, "storedVersionsToDelete", strings.Join(sets.List(storedVersionsToDelete), ","), "storedVersionsToPreserve", strings.Join(sets.List(storedVersionsToPreserve), ",")) + // Ref https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#writing-reading-and-updating-versioned-customresourcedefinition-objects + storedVersionsToDelete := currentStatusStoredVersions.Delete(currentStorageVersion) + log.Info("CR migration required", "kind", newCRD.Spec.Names.Kind, "storedVersionsToDelete", strings.Join(sets.List(storedVersionsToDelete), ","), "storedVersionToPreserve", currentStorageVersion) if err := m.migrateResourcesForCRD(ctx, currentCRD, currentStorageVersion); err != nil { return false, err @@ -137,8 +142,8 @@ func (m *crdMigrator) run(ctx context.Context, newCRD *apiextensionsv1.CustomRes } func (m *crdMigrator) migrateResourcesForCRD(ctx context.Context, crd *apiextensionsv1.CustomResourceDefinition, currentStorageVersion string) error { - log := logf.Log - log.Info("Migrating CRs, this operation may take a while...", "kind", crd.Spec.Names.Kind) + log := logf.Log.WithValues("CustomResourceDefinition", klog.KObj(crd)) + log.Info("Migrating CRs, this operation may take a while...") list := &unstructured.UnstructuredList{} list.SetGroupVersionKind(schema.GroupVersionKind{ @@ -149,7 +154,7 @@ func (m *crdMigrator) migrateResourcesForCRD(ctx context.Context, crd *apiextens var i int for { - if err := retryWithExponentialBackoff(newReadBackoff(), func() error { + if err := retryWithExponentialBackoff(ctx, newCRDMigrationBackoff(), func(ctx context.Context) error { return m.Client.List(ctx, list, client.Continue(list.GetContinue())) }); err != nil { return errors.Wrapf(err, "failed to list %q", list.GetKind()) @@ -159,7 +164,7 @@ func (m *crdMigrator) migrateResourcesForCRD(ctx context.Context, crd *apiextens obj := list.Items[i] log.V(5).Info("Migrating", logf.UnstructuredToValues(obj)...) - if err := retryWithExponentialBackoff(newWriteBackoff(), func() error { + if err := retryWithExponentialBackoff(ctx, newCRDMigrationBackoff(), func(ctx context.Context) error { return handleMigrateErr(m.Client.Update(ctx, &obj)) }); err != nil { return errors.Wrapf(err, "failed to migrate %s/%s", obj.GetNamespace(), obj.GetName()) @@ -178,13 +183,13 @@ func (m *crdMigrator) migrateResourcesForCRD(ctx context.Context, crd *apiextens } } - log.V(2).Info(fmt.Sprintf("CR migration completed: migrated %d objects", i), "kind", crd.Spec.Names.Kind) + log.V(2).Info(fmt.Sprintf("CR migration completed: migrated %d objects", i)) return nil } func (m *crdMigrator) patchCRDStoredVersions(ctx context.Context, crd *apiextensionsv1.CustomResourceDefinition, currentStorageVersion string) error { crd.Status.StoredVersions = []string{currentStorageVersion} - if err := retryWithExponentialBackoff(newWriteBackoff(), func() error { + if err := retryWithExponentialBackoff(ctx, newWriteBackoff(), func(ctx context.Context) error { return m.Client.Status().Update(ctx, crd) }); err != nil { return errors.Wrapf(err, "failed to update status.storedVersions for CRD %q", crd.Name) @@ -222,3 +227,20 @@ func storageVersionForCRD(crd *apiextensionsv1.CustomResourceDefinition) (string } return "", errors.Errorf("could not find storage version for CRD %q", crd.Name) } + +// newCRDMigrationBackoff creates a new API Machinery backoff parameter set suitable for use with crd migration operations. +// Clusterctl upgrades cert-manager right before doing CRD migration. This may lead to rollout of new certificates. +// The time between new certificate creation + injection into objects (CRD, Webhooks) and the new secrets getting propagated +// to the controller can be 60-90s, because the kubelet only periodically syncs secret contents to pods. +// During this timespan conversion, validating- or mutating-webhooks may be unavailable and cause a failure. +func newCRDMigrationBackoff() wait.Backoff { + // Return a exponential backoff configuration which returns durations for a total time of ~1m30s + some buffer. + // Example: 0, .25s, .6s, 1.1s, 1.8s, 2.7s, 4s, 6s, 9s, 12s, 17s, 25s, 35s, 49s, 69s, 97s, 135s + // Jitter is added as a random fraction of the duration multiplied by the jitter factor. + return wait.Backoff{ + Duration: 250 * time.Millisecond, + Factor: 1.4, + Steps: 17, + Jitter: 0.1, + } +} diff --git a/cmd/clusterctl/client/cluster/crd_migration_test.go b/cmd/clusterctl/client/cluster/crd_migration_test.go index 35b7980e93ad..95efc6c31488 100644 --- a/cmd/clusterctl/client/cluster/crd_migration_test.go +++ b/cmd/clusterctl/client/cluster/crd_migration_test.go @@ -52,7 +52,7 @@ func Test_CRDMigrator(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: apiextensionsv1.CustomResourceDefinitionSpec{ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - {Name: "v1alpha1"}, // No storage version as storage is not set. + {Name: "v1alpha1", Served: true}, // No storage version as storage is not set. }, }, Status: apiextensionsv1.CustomResourceDefinitionStatus{StoredVersions: []string{"v1alpha1"}}, @@ -61,7 +61,7 @@ func Test_CRDMigrator(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: apiextensionsv1.CustomResourceDefinitionSpec{ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - {Name: "v1alpha1"}, + {Name: "v1alpha1", Served: true}, }, }, }, @@ -69,12 +69,12 @@ func Test_CRDMigrator(t *testing.T) { wantIsMigrated: false, }, { - name: "No-op if new CRD supports same versions", + name: "No-op if new CRD uses the same storage version", currentCRD: &apiextensionsv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: apiextensionsv1.CustomResourceDefinitionSpec{ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - {Name: "v1alpha1", Storage: true}, + {Name: "v1alpha1", Storage: true, Served: true}, }, }, Status: apiextensionsv1.CustomResourceDefinitionStatus{StoredVersions: []string{"v1alpha1"}}, @@ -83,19 +83,19 @@ func Test_CRDMigrator(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: apiextensionsv1.CustomResourceDefinitionSpec{ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - {Name: "v1alpha1", Storage: true}, + {Name: "v1alpha1", Storage: true, Served: true}, }, }, }, wantIsMigrated: false, }, { - name: "No-op if new CRD adds a new versions", + name: "No-op if new CRD adds a new versions and stored versions is only the old storage version", currentCRD: &apiextensionsv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: apiextensionsv1.CustomResourceDefinitionSpec{ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - {Name: "v1alpha1", Storage: true}, + {Name: "v1alpha1", Storage: true, Served: true}, }, }, Status: apiextensionsv1.CustomResourceDefinitionStatus{StoredVersions: []string{"v1alpha1"}}, @@ -104,8 +104,8 @@ func Test_CRDMigrator(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: apiextensionsv1.CustomResourceDefinitionSpec{ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - {Name: "v1beta1", Storage: true}, // v1beta1 is being added - {Name: "v1alpha1"}, // v1alpha1 still exists + {Name: "v1beta1", Storage: true, Served: false}, // v1beta1 is being added + {Name: "v1alpha1", Served: true}, // v1alpha1 still exists }, }, }, @@ -117,7 +117,7 @@ func Test_CRDMigrator(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: apiextensionsv1.CustomResourceDefinitionSpec{ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - {Name: "v1alpha1", Storage: true}, + {Name: "v1alpha1", Storage: true, Served: true}, }, }, Status: apiextensionsv1.CustomResourceDefinitionStatus{StoredVersions: []string{"v1alpha1"}}, @@ -126,14 +126,14 @@ func Test_CRDMigrator(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: apiextensionsv1.CustomResourceDefinitionSpec{ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - {Name: "v1", Storage: true}, // CRD is jumping to v1, but dropping current storage version without allowing migration. + {Name: "v1", Storage: true, Served: true}, // CRD is jumping to v1, but dropping current storage version without allowing migration. }, }, }, wantErr: true, }, { - name: "Migrate", + name: "Migrate CRs if there are stored versions is not only the current storage version", CRs: []unstructured.Unstructured{ { Object: map[string]interface{}{ @@ -172,8 +172,8 @@ func Test_CRDMigrator(t *testing.T) { Group: "foo", Names: apiextensionsv1.CustomResourceDefinitionNames{Kind: "Foo", ListKind: "FooList"}, Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - {Name: "v1beta1", Storage: true}, - {Name: "v1alpha1"}, + {Name: "v1beta1", Storage: true, Served: true}, + {Name: "v1alpha1", Served: true}, }, }, Status: apiextensionsv1.CustomResourceDefinitionStatus{StoredVersions: []string{"v1beta1", "v1alpha1"}}, @@ -184,8 +184,8 @@ func Test_CRDMigrator(t *testing.T) { Group: "foo", Names: apiextensionsv1.CustomResourceDefinitionNames{Kind: "Foo", ListKind: "FooList"}, Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - {Name: "v1", Storage: true}, // v1 is being added - {Name: "v1beta1"}, // v1beta1 still there (required for migration) + {Name: "v1", Storage: true, Served: true}, // v1 is being added + {Name: "v1beta1", Served: true}, // v1beta1 still there // v1alpha1 is being dropped }, }, @@ -203,7 +203,7 @@ func Test_CRDMigrator(t *testing.T) { objs = append(objs, &tt.CRs[i]) } - c, err := test.NewFakeProxy().WithObjs(objs...).NewClient() + c, err := test.NewFakeProxy().WithObjs(objs...).NewClient(context.Background()) g.Expect(err).ToNot(HaveOccurred()) countingClient := newUpgradeCountingClient(c) @@ -211,7 +211,7 @@ func Test_CRDMigrator(t *testing.T) { Client: countingClient, } - isMigrated, err := m.run(ctx, tt.newCRD) + isMigrated, err := m.run(context.Background(), tt.newCRD) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { @@ -228,7 +228,7 @@ func Test_CRDMigrator(t *testing.T) { // Check storage versions has been cleaned up. currentCRD := &apiextensionsv1.CustomResourceDefinition{} - err = c.Get(ctx, client.ObjectKeyFromObject(tt.newCRD), currentCRD) + err = c.Get(context.Background(), client.ObjectKeyFromObject(tt.newCRD), currentCRD) g.Expect(err).ToNot(HaveOccurred()) g.Expect(currentCRD.Status.StoredVersions).To(Equal(tt.wantStoredVersions)) } diff --git a/cmd/clusterctl/client/cluster/installer.go b/cmd/clusterctl/client/cluster/installer.go index 34010bc631f0..6e428e64777f 100644 --- a/cmd/clusterctl/client/cluster/installer.go +++ b/cmd/clusterctl/client/cluster/installer.go @@ -52,7 +52,7 @@ type ProviderInstaller interface { Add(repository.Components) // Install performs the installation of the providers ready in the install queue. - Install(InstallOptions) ([]repository.Components, error) + Install(context.Context, InstallOptions) ([]repository.Components, error) // Validate performs steps to validate a management cluster by looking at the current state and the providers in the queue. // The following checks are performed in order to ensure a fully operational cluster: @@ -60,7 +60,7 @@ type ProviderInstaller interface { // - All the providers in must support the same API Version of Cluster API (contract) // - All provider CRDs that are referenced in core Cluster API CRDs must comply with the CRD naming scheme, // otherwise a warning is logged. - Validate() error + Validate(context.Context) error // Images returns the list of images required for installing the providers ready in the install queue. Images() []string @@ -93,36 +93,36 @@ func (i *providerInstaller) Add(components repository.Components) { }) } -func (i *providerInstaller) Install(opts InstallOptions) ([]repository.Components, error) { +func (i *providerInstaller) Install(ctx context.Context, opts InstallOptions) ([]repository.Components, error) { ret := make([]repository.Components, 0, len(i.installQueue)) for _, components := range i.installQueue { - if err := installComponentsAndUpdateInventory(components, i.providerComponents, i.providerInventory); err != nil { + if err := installComponentsAndUpdateInventory(ctx, components, i.providerComponents, i.providerInventory); err != nil { return nil, err } ret = append(ret, components) } - return ret, waitForProvidersReady(opts, i.installQueue, i.proxy) + return ret, waitForProvidersReady(ctx, opts, i.installQueue, i.proxy) } -func installComponentsAndUpdateInventory(components repository.Components, providerComponents ComponentsClient, providerInventory InventoryClient) error { +func installComponentsAndUpdateInventory(ctx context.Context, components repository.Components, providerComponents ComponentsClient, providerInventory InventoryClient) error { log := logf.Log - log.Info("Installing", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace()) + log.Info("Installing", "provider", components.ManifestLabel(), "version", components.Version(), "targetNamespace", components.TargetNamespace()) inventoryObject := components.InventoryObject() - log.V(1).Info("Creating objects", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace()) - if err := providerComponents.Create(components.Objs()); err != nil { + log.V(1).Info("Creating objects", "provider", components.ManifestLabel(), "version", components.Version(), "targetNamespace", components.TargetNamespace()) + if err := providerComponents.Create(ctx, components.Objs()); err != nil { return err } - log.V(1).Info("Creating inventory entry", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace()) - return providerInventory.Create(inventoryObject) + log.V(1).Info("Creating inventory entry", "provider", components.ManifestLabel(), "version", components.Version(), "targetNamespace", components.TargetNamespace()) + return providerInventory.Create(ctx, inventoryObject) } // waitForProvidersReady waits till the installed components are ready. -func waitForProvidersReady(opts InstallOptions, installQueue []repository.Components, proxy Proxy) error { +func waitForProvidersReady(ctx context.Context, opts InstallOptions, installQueue []repository.Components, proxy Proxy) error { // If we dont have to wait for providers to be installed // return early. if !opts.WaitProviders { @@ -132,16 +132,16 @@ func waitForProvidersReady(opts InstallOptions, installQueue []repository.Compon log := logf.Log log.Info("Waiting for providers to be available...") - return waitManagerDeploymentsReady(opts, installQueue, proxy) + return waitManagerDeploymentsReady(ctx, opts, installQueue, proxy) } // waitManagerDeploymentsReady waits till the installed manager deployments are ready. -func waitManagerDeploymentsReady(opts InstallOptions, installQueue []repository.Components, proxy Proxy) error { +func waitManagerDeploymentsReady(ctx context.Context, opts InstallOptions, installQueue []repository.Components, proxy Proxy) error { for _, components := range installQueue { for _, obj := range components.Objs() { if util.IsDeploymentWithManager(obj) { - if err := waitDeploymentReady(obj, opts.WaitProviderTimeout, proxy); err != nil { - return err + if err := waitDeploymentReady(ctx, obj, opts.WaitProviderTimeout, proxy); err != nil { + return errors.Wrapf(err, "deployment %q is not ready after %s", obj.GetName(), opts.WaitProviderTimeout) } } } @@ -149,9 +149,9 @@ func waitManagerDeploymentsReady(opts InstallOptions, installQueue []repository. return nil } -func waitDeploymentReady(deployment unstructured.Unstructured, timeout time.Duration, proxy Proxy) error { - return wait.Poll(100*time.Millisecond, timeout, func() (bool, error) { - c, err := proxy.NewClient() +func waitDeploymentReady(ctx context.Context, deployment unstructured.Unstructured, timeout time.Duration, proxy Proxy) error { + return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) { + c, err := proxy.NewClient(ctx) if err != nil { return false, err } @@ -160,7 +160,7 @@ func waitDeploymentReady(deployment unstructured.Unstructured, timeout time.Dura Name: deployment.GetName(), } dep := &appsv1.Deployment{} - if err := c.Get(context.TODO(), key, dep); err != nil { + if err := c.Get(ctx, key, dep); err != nil { return false, err } for _, c := range dep.Status.Conditions { @@ -172,9 +172,9 @@ func waitDeploymentReady(deployment unstructured.Unstructured, timeout time.Dura }) } -func (i *providerInstaller) Validate() error { +func (i *providerInstaller) Validate(ctx context.Context) error { // Get the list of providers currently in the cluster. - providerList, err := i.providerInventory.List() + providerList, err := i.providerInventory.List(ctx) if err != nil { return err } @@ -198,7 +198,7 @@ func (i *providerInstaller) Validate() error { } coreProvider := coreProviders[0] - managementClusterContract, err := i.getProviderContract(providerInstanceContracts, coreProvider) + managementClusterContract, err := i.getProviderContract(ctx, providerInstanceContracts, coreProvider) if err != nil { return err } @@ -208,7 +208,7 @@ func (i *providerInstaller) Validate() error { provider := components.InventoryObject() // Gets the API Version of Cluster API (contract) the provider support and compare it with the management cluster contract. - providerContract, err := i.getProviderContract(providerInstanceContracts, provider) + providerContract, err := i.getProviderContract(ctx, providerInstanceContracts, provider) if err != nil { return err } @@ -286,7 +286,7 @@ func validateCRDName(obj unstructured.Unstructured, gk *schema.GroupKind) error } // getProviderContract returns the API Version of Cluster API (contract) for a provider instance. -func (i *providerInstaller) getProviderContract(providerInstanceContracts map[string]string, provider clusterctlv1.Provider) (string, error) { +func (i *providerInstaller) getProviderContract(ctx context.Context, providerInstanceContracts map[string]string, provider clusterctlv1.Provider) (string, error) { // If the contract for the provider instance is already known, return it. if contract, ok := providerInstanceContracts[provider.InstanceName()]; ok { return contract, nil @@ -300,12 +300,12 @@ func (i *providerInstaller) getProviderContract(providerInstanceContracts map[st return "", err } - providerRepository, err := i.repositoryClientFactory(configRepository, i.configClient) + providerRepository, err := i.repositoryClientFactory(ctx, configRepository, i.configClient) if err != nil { return "", err } - latestMetadata, err := providerRepository.Metadata(provider.Version).Get() + latestMetadata, err := providerRepository.Metadata(provider.Version).Get(ctx) if err != nil { return "", err } diff --git a/cmd/clusterctl/client/cluster/installer_test.go b/cmd/clusterctl/client/cluster/installer_test.go index 6b34cd2a097a..f2d61368f797 100644 --- a/cmd/clusterctl/client/cluster/installer_test.go +++ b/cmd/clusterctl/client/cluster/installer_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "testing" . "github.com/onsi/gomega" @@ -236,23 +237,25 @@ func Test_providerInstaller_Validate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(fakeReader)) + ctx := context.Background() + + configClient, _ := config.New(ctx, "", config.InjectReader(fakeReader)) i := &providerInstaller{ configClient: configClient, proxy: tt.fields.proxy, providerInventory: newInventoryClient(tt.fields.proxy, nil), - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(repositoryMap[provider.ManifestLabel()])) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(repositoryMap[provider.ManifestLabel()])) }, installQueue: tt.fields.installQueue, } - err := i.Validate() + err := i.Validate(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } }) } @@ -295,7 +298,7 @@ func Test_providerInstaller_ValidateCRDName(t *testing.T) { if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } }) } diff --git a/cmd/clusterctl/client/cluster/internal/dryrun/client.go b/cmd/clusterctl/client/cluster/internal/dryrun/client.go index aa08f8e6d53b..f2ee99c4c869 100644 --- a/cmd/clusterctl/client/cluster/internal/dryrun/client.go +++ b/cmd/clusterctl/client/cluster/internal/dryrun/client.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/client/fake" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/scheme" ) @@ -114,7 +115,7 @@ type ChangeSummary struct { // when the objects are not found in the internal object tracker. Typically the apiReader passed would be a reader client // to a real Kubernetes Cluster. func NewClient(apiReader client.Reader, objs []client.Object) *Client { - fakeClient := fake.NewClientBuilder().WithObjects(objs...).WithScheme(localScheme).Build() + fakeClient := fake.NewClientBuilder().WithObjects(objs...).WithStatusSubresource(&clusterv1.ClusterClass{}, &clusterv1.Cluster{}).WithScheme(localScheme).Build() return &Client{ fakeClient: fakeClient, apiReader: apiReader, @@ -308,6 +309,16 @@ func (c *Client) SubResource(subResource string) client.SubResourceClient { return c.fakeClient.SubResource(subResource) } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *Client) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.fakeClient.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *Client) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.fakeClient.IsObjectNamespaced(obj) +} + // Changes generates a summary of all the changes observed from the creation of the dry run client // to when this function is called. func (c *Client) Changes(ctx context.Context) (*ChangeSummary, error) { diff --git a/cmd/clusterctl/client/cluster/inventory.go b/cmd/clusterctl/client/cluster/inventory.go index c71529c3ef4c..a318a2f273f3 100644 --- a/cmd/clusterctl/client/cluster/inventory.go +++ b/cmd/clusterctl/client/cluster/inventory.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "time" @@ -93,34 +94,34 @@ type InventoryClient interface { // EnsureCustomResourceDefinitions installs the CRD required for creating inventory items, if necessary. // Nb. In order to provide a simpler out-of-the box experience, the inventory CRD // is embedded in the clusterctl binary. - EnsureCustomResourceDefinitions() error + EnsureCustomResourceDefinitions(ctx context.Context) error // Create an inventory item for a provider instance installed in the cluster. - Create(clusterctlv1.Provider) error + Create(context.Context, clusterctlv1.Provider) error // List returns the inventory items for all the provider instances installed in the cluster. - List() (*clusterctlv1.ProviderList, error) + List(ctx context.Context) (*clusterctlv1.ProviderList, error) // GetDefaultProviderName returns the default provider for a given ProviderType. // In case there is only a single provider for a given type, e.g. only the AWS infrastructure Provider, it returns // this as the default provider; In case there are more provider of the same type, there is no default provider. - GetDefaultProviderName(providerType clusterctlv1.ProviderType) (string, error) + GetDefaultProviderName(ctx context.Context, providerType clusterctlv1.ProviderType) (string, error) // GetProviderVersion returns the version for a given provider. - GetProviderVersion(provider string, providerType clusterctlv1.ProviderType) (string, error) + GetProviderVersion(ctx context.Context, provider string, providerType clusterctlv1.ProviderType) (string, error) // GetProviderNamespace returns the namespace for a given provider. - GetProviderNamespace(provider string, providerType clusterctlv1.ProviderType) (string, error) + GetProviderNamespace(ctx context.Context, provider string, providerType clusterctlv1.ProviderType) (string, error) // CheckCAPIContract checks the Cluster API version installed in the management cluster, and fails if this version // does not match the current one supported by clusterctl. - CheckCAPIContract(...CheckCAPIContractOption) error + CheckCAPIContract(context.Context, ...CheckCAPIContractOption) error // CheckCAPIInstalled checks if Cluster API is installed on the management cluster. - CheckCAPIInstalled() (bool, error) + CheckCAPIInstalled(ctx context.Context) (bool, error) // CheckSingleProviderInstance ensures that only one instance of a provider is running, returns error otherwise. - CheckSingleProviderInstance() error + CheckSingleProviderInstance(ctx context.Context) error } // inventoryClient implements InventoryClient. @@ -140,7 +141,7 @@ func newInventoryClient(proxy Proxy, pollImmediateWaiter PollImmediateWaiter) *i } } -func (p *inventoryClient) EnsureCustomResourceDefinitions() error { +func (p *inventoryClient) EnsureCustomResourceDefinitions(ctx context.Context) error { log := logf.Log if err := p.proxy.ValidateKubernetesVersion(); err != nil { @@ -152,7 +153,7 @@ func (p *inventoryClient) EnsureCustomResourceDefinitions() error { // NB. NewClient has an internal retry loop that should mitigate temporary connection glitch; here we are // trying to detect persistent connection problems (>10s) before entering in longer retry loops while executing // clusterctl operations. - _, err := p.proxy.NewClient() + _, err := p.proxy.NewClient(ctx) if err != nil { return err } @@ -161,9 +162,9 @@ func (p *inventoryClient) EnsureCustomResourceDefinitions() error { // Nb. The operation is wrapped in a retry loop to make EnsureCustomResourceDefinitions more resilient to unexpected conditions. var crdIsIstalled bool listInventoryBackoff := newReadBackoff() - if err := retryWithExponentialBackoff(listInventoryBackoff, func() error { + if err := retryWithExponentialBackoff(ctx, listInventoryBackoff, func(ctx context.Context) error { var err error - crdIsIstalled, err = checkInventoryCRDs(p.proxy) + crdIsIstalled, err = checkInventoryCRDs(ctx, p.proxy) return err }); err != nil { return err @@ -188,8 +189,8 @@ func (p *inventoryClient) EnsureCustomResourceDefinitions() error { // Create the Kubernetes object. // Nb. The operation is wrapped in a retry loop to make EnsureCustomResourceDefinitions more resilient to unexpected conditions. - if err := retryWithExponentialBackoff(createInventoryObjectBackoff, func() error { - return p.createObj(o) + if err := retryWithExponentialBackoff(ctx, createInventoryObjectBackoff, func(ctx context.Context) error { + return p.createObj(ctx, o) }); err != nil { return err } @@ -197,8 +198,8 @@ func (p *inventoryClient) EnsureCustomResourceDefinitions() error { // If the object is a CRDs, waits for it being Established. if apiextensionsv1.SchemeGroupVersion.WithKind("CustomResourceDefinition").GroupKind() == o.GroupVersionKind().GroupKind() { crdKey := client.ObjectKeyFromObject(&o) - if err := p.pollImmediateWaiter(waitInventoryCRDInterval, waitInventoryCRDTimeout, func() (bool, error) { - c, err := p.proxy.NewClient() + if err := p.pollImmediateWaiter(ctx, waitInventoryCRDInterval, waitInventoryCRDTimeout, func(ctx context.Context) (bool, error) { + c, err := p.proxy.NewClient(ctx) if err != nil { return false, err } @@ -224,8 +225,8 @@ func (p *inventoryClient) EnsureCustomResourceDefinitions() error { } // checkInventoryCRDs checks if the inventory CRDs are installed in the cluster. -func checkInventoryCRDs(proxy Proxy) (bool, error) { - c, err := proxy.NewClient() +func checkInventoryCRDs(ctx context.Context, proxy Proxy) (bool, error) { + c, err := proxy.NewClient(ctx) if err != nil { return false, err } @@ -246,8 +247,8 @@ func checkInventoryCRDs(proxy Proxy) (bool, error) { return true, errors.Errorf("clusterctl inventory CRD does not defines the %s version", clusterctlv1.GroupVersion.Version) } -func (p *inventoryClient) createObj(o unstructured.Unstructured) error { - c, err := p.proxy.NewClient() +func (p *inventoryClient) createObj(ctx context.Context, o unstructured.Unstructured) error { + c, err := p.proxy.NewClient(ctx) if err != nil { return err } @@ -268,11 +269,11 @@ func (p *inventoryClient) createObj(o unstructured.Unstructured) error { return nil } -func (p *inventoryClient) Create(m clusterctlv1.Provider) error { +func (p *inventoryClient) Create(ctx context.Context, m clusterctlv1.Provider) error { // Create the Kubernetes object. createInventoryObjectBackoff := newWriteBackoff() - return retryWithExponentialBackoff(createInventoryObjectBackoff, func() error { - cl, err := p.proxy.NewClient() + return retryWithExponentialBackoff(ctx, createInventoryObjectBackoff, func(ctx context.Context) error { + cl, err := p.proxy.NewClient(ctx) if err != nil { return err } @@ -305,12 +306,12 @@ func (p *inventoryClient) Create(m clusterctlv1.Provider) error { }) } -func (p *inventoryClient) List() (*clusterctlv1.ProviderList, error) { +func (p *inventoryClient) List(ctx context.Context) (*clusterctlv1.ProviderList, error) { providerList := &clusterctlv1.ProviderList{} listProvidersBackoff := newReadBackoff() - if err := retryWithExponentialBackoff(listProvidersBackoff, func() error { - return listProviders(p.proxy, providerList) + if err := retryWithExponentialBackoff(ctx, listProvidersBackoff, func(ctx context.Context) error { + return listProviders(ctx, p.proxy, providerList) }); err != nil { return nil, err } @@ -319,8 +320,8 @@ func (p *inventoryClient) List() (*clusterctlv1.ProviderList, error) { } // listProviders retrieves the list of provider inventory objects. -func listProviders(proxy Proxy, providerList *clusterctlv1.ProviderList) error { - cl, err := proxy.NewClient() +func listProviders(ctx context.Context, proxy Proxy, providerList *clusterctlv1.ProviderList) error { + cl, err := proxy.NewClient(ctx) if err != nil { return err } @@ -331,8 +332,8 @@ func listProviders(proxy Proxy, providerList *clusterctlv1.ProviderList) error { return nil } -func (p *inventoryClient) GetDefaultProviderName(providerType clusterctlv1.ProviderType) (string, error) { - providerList, err := p.List() +func (p *inventoryClient) GetDefaultProviderName(ctx context.Context, providerType clusterctlv1.ProviderType) (string, error) { + providerList, err := p.List(ctx) if err != nil { return "", err } @@ -352,8 +353,8 @@ func (p *inventoryClient) GetDefaultProviderName(providerType clusterctlv1.Provi return "", nil } -func (p *inventoryClient) GetProviderVersion(provider string, providerType clusterctlv1.ProviderType) (string, error) { - providerList, err := p.List() +func (p *inventoryClient) GetProviderVersion(ctx context.Context, provider string, providerType clusterctlv1.ProviderType) (string, error) { + providerList, err := p.List(ctx) if err != nil { return "", err } @@ -372,8 +373,8 @@ func (p *inventoryClient) GetProviderVersion(provider string, providerType clust return "", nil } -func (p *inventoryClient) GetProviderNamespace(provider string, providerType clusterctlv1.ProviderType) (string, error) { - providerList, err := p.List() +func (p *inventoryClient) GetProviderNamespace(ctx context.Context, provider string, providerType clusterctlv1.ProviderType) (string, error) { + providerList, err := p.List(ctx) if err != nil { return "", err } @@ -392,13 +393,13 @@ func (p *inventoryClient) GetProviderNamespace(provider string, providerType clu return "", nil } -func (p *inventoryClient) CheckCAPIContract(options ...CheckCAPIContractOption) error { +func (p *inventoryClient) CheckCAPIContract(ctx context.Context, options ...CheckCAPIContractOption) error { opt := &CheckCAPIContractOptions{} for _, o := range options { o.Apply(opt) } - c, err := p.proxy.NewClient() + c, err := p.proxy.NewClient(ctx) if err != nil { return err } @@ -431,8 +432,8 @@ func (p *inventoryClient) CheckCAPIContract(options ...CheckCAPIContractOption) return errors.Errorf("failed to check Cluster API version") } -func (p *inventoryClient) CheckCAPIInstalled() (bool, error) { - if err := p.CheckCAPIContract(AllowCAPIAnyContract{}); err != nil { +func (p *inventoryClient) CheckCAPIInstalled(ctx context.Context) (bool, error) { + if err := p.CheckCAPIContract(ctx, AllowCAPIAnyContract{}); err != nil { if apierrors.IsNotFound(err) { // The expected CRDs are not installed on the management. This would mean that Cluster API is not installed on the cluster. return false, nil @@ -442,8 +443,8 @@ func (p *inventoryClient) CheckCAPIInstalled() (bool, error) { return true, nil } -func (p *inventoryClient) CheckSingleProviderInstance() error { - providers, err := p.List() +func (p *inventoryClient) CheckSingleProviderInstance(ctx context.Context) error { + providers, err := p.List(ctx) if err != nil { return err } diff --git a/cmd/clusterctl/client/cluster/inventory_test.go b/cmd/clusterctl/client/cluster/inventory_test.go index 92916358c2ba..9eaa6f7e29d1 100644 --- a/cmd/clusterctl/client/cluster/inventory_test.go +++ b/cmd/clusterctl/client/cluster/inventory_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "testing" "time" @@ -26,12 +27,11 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" - clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" ) -func fakePollImmediateWaiter(_, _ time.Duration, _ wait.ConditionFunc) error { +func fakePollImmediateWaiter(_ context.Context, _, _ time.Duration, _ wait.ConditionWithContextFunc) error { return nil } @@ -66,25 +66,28 @@ func Test_inventoryClient_CheckInventoryCRDs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + proxy := test.NewFakeProxy() p := newInventoryClient(proxy, fakePollImmediateWaiter) if tt.fields.alreadyHasCRD { // forcing creation of metadata before test - g.Expect(p.EnsureCustomResourceDefinitions()).To(Succeed()) + g.Expect(p.EnsureCustomResourceDefinitions(ctx)).To(Succeed()) } - res, err := checkInventoryCRDs(proxy) + res, err := checkInventoryCRDs(ctx, proxy) g.Expect(res).To(Equal(tt.want)) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } }) } } var fooProvider = clusterctlv1.Provider{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns1", ResourceVersion: "999"}} +var v1alpha4Contract = "v1alpha4" func Test_inventoryClient_List(t *testing.T) { type fields struct { @@ -114,13 +117,13 @@ func Test_inventoryClient_List(t *testing.T) { g := NewWithT(t) p := newInventoryClient(test.NewFakeProxy().WithObjs(tt.fields.initObjs...), fakePollImmediateWaiter) - got, err := p.List() + got, err := p.List(context.Background()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got.Items).To(ConsistOf(tt.want)) }) } @@ -176,24 +179,26 @@ func Test_inventoryClient_Create(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + p := &inventoryClient{ proxy: tt.fields.proxy, } - err := p.Create(tt.args.m) + err := p.Create(ctx, tt.args.m) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) - got, err := p.List() + got, err := p.List(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) for i := range got.Items { tt.wantProviders[i].ResourceVersion = got.Items[i].ResourceVersion @@ -278,26 +283,6 @@ func Test_CheckCAPIContract(t *testing.T) { args: args{}, wantErr: true, }, - { - name: "Pass when Cluster API with v1alpha3 contract is installed, but this is explicitly tolerated", - fields: fields{ - proxy: test.NewFakeProxy().WithObjs(&apiextensionsv1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{Name: "clusters.cluster.x-k8s.io"}, - Spec: apiextensionsv1.CustomResourceDefinitionSpec{ - Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - { - Name: clusterv1alpha3.GroupVersion.Version, - Storage: true, - }, - }, - }, - }), - }, - args: args{ - options: []CheckCAPIContractOption{AllowCAPIContract{Contract: clusterv1alpha3.GroupVersion.Version}, AllowCAPIContract{Contract: test.PreviousCAPIContractNotSupported}}, - }, - wantErr: false, - }, { name: "Pass when Cluster API with previous contract is installed, but this is explicitly tolerated", fields: fields{ @@ -317,7 +302,7 @@ func Test_CheckCAPIContract(t *testing.T) { }), }, args: args{ - options: []CheckCAPIContractOption{AllowCAPIContract{Contract: clusterv1alpha3.GroupVersion.Version}, AllowCAPIContract{Contract: test.PreviousCAPIContractNotSupported}}, + options: []CheckCAPIContractOption{AllowCAPIContract{Contract: v1alpha4Contract}, AllowCAPIContract{Contract: test.PreviousCAPIContractNotSupported}}, }, wantErr: false, }, @@ -350,12 +335,12 @@ func Test_CheckCAPIContract(t *testing.T) { p := &inventoryClient{ proxy: tt.fields.proxy, } - err := p.CheckCAPIContract(tt.args.options...) + err := p.CheckCAPIContract(context.Background(), tt.args.options...) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } @@ -397,13 +382,13 @@ func Test_inventoryClient_CheckSingleProviderInstance(t *testing.T) { g := NewWithT(t) p := newInventoryClient(test.NewFakeProxy().WithObjs(tt.fields.initObjs...), fakePollImmediateWaiter) - err := p.CheckSingleProviderInstance() + err := p.CheckSingleProviderInstance(context.Background()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index 6d88b589197d..f0b6178d64f0 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -21,36 +21,43 @@ import ( "fmt" "os" "path/filepath" + "time" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/version" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/yaml" ) +// ResourceMutatorFunc holds the type for mutators to be applied on resources during a move operation. +type ResourceMutatorFunc func(u *unstructured.Unstructured) error + // ObjectMover defines methods for moving Cluster API objects to another management cluster. type ObjectMover interface { // Move moves all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target management cluster. - Move(namespace string, toCluster Client, dryRun bool) error + Move(ctx context.Context, namespace string, toCluster Client, dryRun bool, mutators ...ResourceMutatorFunc) error // ToDirectory writes all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target directory. - ToDirectory(namespace string, directory string) error + ToDirectory(ctx context.Context, namespace string, directory string) error // FromDirectory reads all the Cluster API objects existing in a configured directory to a target management cluster. - FromDirectory(toCluster Client, directory string) error + FromDirectory(ctx context.Context, toCluster Client, directory string) error } // objectMover implements the ObjectMover interface. @@ -63,7 +70,7 @@ type objectMover struct { // ensure objectMover implements the ObjectMover interface. var _ ObjectMover = &objectMover{} -func (o *objectMover) Move(namespace string, toCluster Client, dryRun bool) error { +func (o *objectMover) Move(ctx context.Context, namespace string, toCluster Client, dryRun bool, mutators ...ResourceMutatorFunc) error { log := logf.Log log.Info("Performing move...") o.dryRun = dryRun @@ -75,12 +82,12 @@ func (o *objectMover) Move(namespace string, toCluster Client, dryRun bool) erro // checks that all the required providers in place in the target cluster. if !o.dryRun { - if err := o.checkTargetProviders(toCluster.ProviderInventory()); err != nil { + if err := o.checkTargetProviders(ctx, toCluster.ProviderInventory()); err != nil { return errors.Wrap(err, "failed to check providers in target cluster") } } - objectGraph, err := o.getObjectGraph(namespace) + objectGraph, err := o.getObjectGraph(ctx, namespace) if err != nil { return errors.Wrap(err, "failed to get object graph") } @@ -91,22 +98,22 @@ func (o *objectMover) Move(namespace string, toCluster Client, dryRun bool) erro proxy = toCluster.Proxy() } - return o.move(objectGraph, proxy) + return o.move(ctx, objectGraph, proxy, mutators...) } -func (o *objectMover) ToDirectory(namespace string, directory string) error { +func (o *objectMover) ToDirectory(ctx context.Context, namespace string, directory string) error { log := logf.Log log.Info("Moving to directory...") - objectGraph, err := o.getObjectGraph(namespace) + objectGraph, err := o.getObjectGraph(ctx, namespace) if err != nil { return errors.Wrap(err, "failed to get object graph") } - return o.toDirectory(objectGraph, directory) + return o.toDirectory(ctx, objectGraph, directory) } -func (o *objectMover) FromDirectory(toCluster Client, directory string) error { +func (o *objectMover) FromDirectory(ctx context.Context, toCluster Client, directory string) error { log := logf.Log log.Info("Moving from directory...") @@ -114,7 +121,7 @@ func (o *objectMover) FromDirectory(toCluster Client, directory string) error { objectGraph := newObjectGraph(o.fromProxy, o.fromProviderInventory) // Gets all the types defined by the CRDs installed by clusterctl plus the ConfigMap/Secret core types. - err := objectGraph.getDiscoveryTypes() + err := objectGraph.getDiscoveryTypes(ctx) if err != nil { return errors.Wrap(err, "failed to retrieve discovery types") } @@ -143,7 +150,7 @@ func (o *objectMover) FromDirectory(toCluster Client, directory string) error { // Restore the objects to the target cluster. proxy := toCluster.Proxy() - return o.fromDirectory(objectGraph, proxy) + return o.fromDirectory(ctx, objectGraph, proxy) } func (o *objectMover) filesToObjs(dir string) ([]unstructured.Unstructured, error) { @@ -177,11 +184,11 @@ func (o *objectMover) filesToObjs(dir string) ([]unstructured.Unstructured, erro return objs, nil } -func (o *objectMover) getObjectGraph(namespace string) (*objectGraph, error) { +func (o *objectMover) getObjectGraph(ctx context.Context, namespace string) (*objectGraph, error) { objectGraph := newObjectGraph(o.fromProxy, o.fromProviderInventory) // Gets all the types defined by the CRDs installed by clusterctl plus the ConfigMap/Secret core types. - err := objectGraph.getDiscoveryTypes() + err := objectGraph.getDiscoveryTypes(ctx) if err != nil { return nil, errors.Wrap(err, "failed to retrieve discovery types") } @@ -189,7 +196,7 @@ func (o *objectMover) getObjectGraph(namespace string) (*objectGraph, error) { // Discovery the object graph for the selected types: // - Nodes are defined the Kubernetes objects (Clusters, Machines etc.) identified during the discovery process. // - Edges are derived by the OwnerReferences between nodes. - if err := objectGraph.Discovery(namespace); err != nil { + if err := objectGraph.Discovery(ctx, namespace); err != nil { return nil, errors.Wrap(err, "failed to discover the object graph") } @@ -197,7 +204,7 @@ func (o *objectMover) getObjectGraph(namespace string) (*objectGraph, error) { // This is required because if the infrastructure is provisioned, then we can reasonably assume that the objects we are moving/backing up are // not currently waiting for long-running reconciliation loops, and so we can safely rely on the pause field on the Cluster object // for blocking any further object reconciliation on the source objects. - if err := o.checkProvisioningCompleted(objectGraph); err != nil { + if err := o.checkProvisioningCompleted(ctx, objectGraph); err != nil { return nil, errors.Wrap(err, "failed to check for provisioned infrastructure") } @@ -215,7 +222,7 @@ func newObjectMover(fromProxy Proxy, fromProviderInventory InventoryClient) *obj } // checkProvisioningCompleted checks if Cluster API has already completed the provisioning of the infrastructure for the objects involved in the move operation. -func (o *objectMover) checkProvisioningCompleted(graph *objectGraph) error { +func (o *objectMover) checkProvisioningCompleted(ctx context.Context, graph *objectGraph) error { if o.dryRun { return nil } @@ -227,8 +234,8 @@ func (o *objectMover) checkProvisioningCompleted(graph *objectGraph) error { for i := range clusters { cluster := clusters[i] clusterObj := &clusterv1.Cluster{} - if err := retryWithExponentialBackoff(readClusterBackoff, func() error { - return getClusterObj(o.fromProxy, cluster, clusterObj) + if err := retryWithExponentialBackoff(ctx, readClusterBackoff, func(ctx context.Context) error { + return getClusterObj(ctx, o.fromProxy, cluster, clusterObj) }); err != nil { return err } @@ -257,8 +264,8 @@ func (o *objectMover) checkProvisioningCompleted(graph *objectGraph) error { for i := range machines { machine := machines[i] machineObj := &clusterv1.Machine{} - if err := retryWithExponentialBackoff(readMachinesBackoff, func() error { - return getMachineObj(o.fromProxy, machine, machineObj) + if err := retryWithExponentialBackoff(ctx, readMachinesBackoff, func(ctx context.Context) error { + return getMachineObj(ctx, o.fromProxy, machine, machineObj) }); err != nil { return err } @@ -272,8 +279,8 @@ func (o *objectMover) checkProvisioningCompleted(graph *objectGraph) error { } // getClusterObj retrieves the clusterObj corresponding to a node with type Cluster. -func getClusterObj(proxy Proxy, cluster *node, clusterObj *clusterv1.Cluster) error { - c, err := proxy.NewClient() +func getClusterObj(ctx context.Context, proxy Proxy, cluster *node, clusterObj *clusterv1.Cluster) error { + c, err := proxy.NewClient(ctx) if err != nil { return err } @@ -284,14 +291,14 @@ func getClusterObj(proxy Proxy, cluster *node, clusterObj *clusterv1.Cluster) er if err := c.Get(ctx, clusterObjKey, clusterObj); err != nil { return errors.Wrapf(err, "error reading Cluster %s/%s", - clusterObj.GetNamespace(), clusterObj.GetName()) + cluster.identity.Namespace, cluster.identity.Name) } return nil } // getMachineObj retrieves the machineObj corresponding to a node with type Machine. -func getMachineObj(proxy Proxy, machine *node, machineObj *clusterv1.Machine) error { - c, err := proxy.NewClient() +func getMachineObj(ctx context.Context, proxy Proxy, machine *node, machineObj *clusterv1.Machine) error { + c, err := proxy.NewClient(ctx) if err != nil { return err } @@ -308,7 +315,7 @@ func getMachineObj(proxy Proxy, machine *node, machineObj *clusterv1.Machine) er } // Move moves all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target management cluster. -func (o *objectMover) move(graph *objectGraph, toProxy Proxy) error { +func (o *objectMover) move(ctx context.Context, graph *objectGraph, toProxy Proxy, mutators ...ResourceMutatorFunc) error { log := logf.Log clusters := graph.getClusters() @@ -319,20 +326,31 @@ func (o *objectMover) move(graph *objectGraph, toProxy Proxy) error { // Sets the pause field on the Cluster object in the source management cluster, so the controllers stop reconciling it. log.V(1).Info("Pausing the source cluster") - if err := setClusterPause(o.fromProxy, clusters, true, o.dryRun); err != nil { + if err := setClusterPause(ctx, o.fromProxy, clusters, true, o.dryRun); err != nil { return err } log.V(1).Info("Pausing the source ClusterClasses") - if err := setClusterClassPause(o.fromProxy, clusterClasses, true, o.dryRun); err != nil { + if err := setClusterClassPause(ctx, o.fromProxy, clusterClasses, true, o.dryRun); err != nil { return errors.Wrap(err, "error pausing ClusterClasses") } - // Ensure all the expected target namespaces are in place before creating objects. - log.V(1).Info("Creating target namespaces, if missing") - if err := o.ensureNamespaces(graph, toProxy); err != nil { - return err + log.Info("Waiting for all resources to be ready to move") + // exponential backoff configuration which returns durations for a total time of ~2m. + // Example: 0, 5s, 8s, 11s, 17s, 26s, 38s, 57s, 86s, 128s + waitForMoveUnblockedBackoff := wait.Backoff{ + Duration: 5 * time.Second, + Factor: 1.5, + Steps: 10, + Jitter: 0.1, } + if err := waitReadyForMove(ctx, o.fromProxy, graph.getMoveNodes(), o.dryRun, waitForMoveUnblockedBackoff); err != nil { + return errors.Wrap(err, "error waiting for resources to be ready to move") + } + + // Nb. DO NOT call ensureNamespaces at this point because: + // - namespace will be ensured to exist before creating the resource. + // - If it's done here, we might create a namespace that can end up unused on target cluster (due to mutators). // Define the move sequence by processing the ownerReference chain, so we ensure that a Kubernetes object is moved only after its owners. // The sequence is bases on object graph nodes, each one representing a Kubernetes object; nodes are grouped, so bulk of nodes can be moved in parallel. e.g. @@ -344,31 +362,35 @@ func (o *objectMover) move(graph *objectGraph, toProxy Proxy) error { // Create all objects group by group, ensuring all the ownerReferences are re-created. log.Info("Creating objects in the target cluster") for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ { - if err := o.createGroup(moveSequence.getGroup(groupIndex), toProxy); err != nil { + if err := o.createGroup(ctx, moveSequence.getGroup(groupIndex), toProxy, mutators...); err != nil { return err } } + // Nb. mutators used after this point (after creating the resources on target clusters) are mainly intended for + // using the right namespace to fetch the resource from the target cluster. + // mutators affecting non metadata fields are no-op after this point. + // Delete all objects group by group in reverse order. log.Info("Deleting objects from the source cluster") for groupIndex := len(moveSequence.groups) - 1; groupIndex >= 0; groupIndex-- { - if err := o.deleteGroup(moveSequence.getGroup(groupIndex)); err != nil { + if err := o.deleteGroup(ctx, moveSequence.getGroup(groupIndex)); err != nil { return err } } // Resume the ClusterClasses in the target management cluster, so the controllers start reconciling it. log.V(1).Info("Resuming the target ClusterClasses") - if err := setClusterClassPause(toProxy, clusterClasses, false, o.dryRun); err != nil { + if err := setClusterClassPause(ctx, toProxy, clusterClasses, false, o.dryRun, mutators...); err != nil { return errors.Wrap(err, "error resuming ClusterClasses") } // Reset the pause field on the Cluster object in the target management cluster, so the controllers start reconciling it. log.V(1).Info("Resuming the target cluster") - return setClusterPause(toProxy, clusters, false, o.dryRun) + return setClusterPause(ctx, toProxy, clusters, false, o.dryRun, mutators...) } -func (o *objectMover) toDirectory(graph *objectGraph, directory string) error { +func (o *objectMover) toDirectory(ctx context.Context, graph *objectGraph, directory string) error { log := logf.Log clusters := graph.getClusters() @@ -379,12 +401,12 @@ func (o *objectMover) toDirectory(graph *objectGraph, directory string) error { // Sets the pause field on the Cluster object in the source management cluster, so the controllers stop reconciling it. log.V(1).Info("Pausing the source cluster") - if err := setClusterPause(o.fromProxy, clusters, true, o.dryRun); err != nil { + if err := setClusterPause(ctx, o.fromProxy, clusters, true, o.dryRun); err != nil { return err } log.V(1).Info("Pausing the source ClusterClasses") - if err := setClusterClassPause(o.fromProxy, clusterClasses, true, o.dryRun); err != nil { + if err := setClusterClassPause(ctx, o.fromProxy, clusterClasses, true, o.dryRun); err != nil { return errors.Wrap(err, "error pausing ClusterClasses") } @@ -398,23 +420,23 @@ func (o *objectMover) toDirectory(graph *objectGraph, directory string) error { // Save all objects group by group log.Info(fmt.Sprintf("Saving files to %s", directory)) for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ { - if err := o.backupGroup(moveSequence.getGroup(groupIndex), directory); err != nil { + if err := o.backupGroup(ctx, moveSequence.getGroup(groupIndex), directory); err != nil { return err } } // Resume the ClusterClasses in the target management cluster, so the controllers start reconciling it. log.V(1).Info("Resuming the target ClusterClasses") - if err := setClusterClassPause(o.fromProxy, clusterClasses, false, o.dryRun); err != nil { + if err := setClusterClassPause(ctx, o.fromProxy, clusterClasses, false, o.dryRun); err != nil { return errors.Wrap(err, "error resuming ClusterClasses") } // Reset the pause field on the Cluster object in the target management cluster, so the controllers start reconciling it. log.V(1).Info("Resuming the source cluster") - return setClusterPause(o.fromProxy, clusters, false, o.dryRun) + return setClusterPause(ctx, o.fromProxy, clusters, false, o.dryRun) } -func (o *objectMover) fromDirectory(graph *objectGraph, toProxy Proxy) error { +func (o *objectMover) fromDirectory(ctx context.Context, graph *objectGraph, toProxy Proxy) error { log := logf.Log // Get clusters from graph @@ -424,7 +446,7 @@ func (o *objectMover) fromDirectory(graph *objectGraph, toProxy Proxy) error { // Ensure all the expected target namespaces are in place before creating objects. log.V(1).Info("Creating target namespaces, if missing") - if err := o.ensureNamespaces(graph, toProxy); err != nil { + if err := o.ensureNamespaces(ctx, graph, toProxy); err != nil { return err } @@ -438,7 +460,7 @@ func (o *objectMover) fromDirectory(graph *objectGraph, toProxy Proxy) error { // Create all objects group by group, ensuring all the ownerReferences are re-created. log.Info("Restoring objects into the target cluster") for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ { - if err := o.restoreGroup(moveSequence.getGroup(groupIndex), toProxy); err != nil { + if err := o.restoreGroup(ctx, moveSequence.getGroup(groupIndex), toProxy); err != nil { return err } } @@ -446,14 +468,14 @@ func (o *objectMover) fromDirectory(graph *objectGraph, toProxy Proxy) error { // Resume reconciling the ClusterClasses after being restored from a backup. // By default, during backup, ClusterClasses are paused so they must be unpaused to be used again log.V(1).Info("Resuming the target ClusterClasses") - if err := setClusterClassPause(toProxy, clusterClasses, false, o.dryRun); err != nil { + if err := setClusterClassPause(ctx, toProxy, clusterClasses, false, o.dryRun); err != nil { return errors.Wrap(err, "error resuming ClusterClasses") } // Resume reconciling the Clusters after being restored from a directory. // By default, when moved to a directory, Clusters are paused, so they must be unpaused to be used again. log.V(1).Info("Resuming the target cluster") - return setClusterPause(toProxy, clusters, false, o.dryRun) + return setClusterPause(ctx, toProxy, clusters, false, o.dryRun) } // moveSequence defines a list of group of moveGroups. @@ -532,7 +554,7 @@ func getMoveSequence(graph *objectGraph) *moveSequence { } // setClusterPause sets the paused field on nodes referring to Cluster objects. -func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool) error { +func setClusterPause(ctx context.Context, proxy Proxy, clusters []*node, value bool, dryRun bool, mutators ...ResourceMutatorFunc) error { if dryRun { return nil } @@ -552,8 +574,8 @@ func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool) err log.V(5).Info("Set Cluster.Spec.Paused", "paused", value, "Cluster", klog.KRef(cluster.identity.Namespace, cluster.identity.Name)) // Nb. The operation is wrapped in a retry loop to make setClusterPause more resilient to unexpected conditions. - if err := retryWithExponentialBackoff(setClusterPauseBackoff, func() error { - return patchCluster(proxy, cluster, patch) + if err := retryWithExponentialBackoff(ctx, setClusterPauseBackoff, func(ctx context.Context) error { + return patchCluster(ctx, proxy, cluster, patch, mutators...) }); err != nil { return errors.Wrapf(err, "error setting Cluster.Spec.Paused=%t", value) } @@ -562,7 +584,7 @@ func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool) err } // setClusterClassPause sets the paused annotation on nodes referring to ClusterClass objects. -func setClusterClassPause(proxy Proxy, clusterclasses []*node, pause bool, dryRun bool) error { +func setClusterClassPause(ctx context.Context, proxy Proxy, clusterclasses []*node, pause bool, dryRun bool, mutators ...ResourceMutatorFunc) error { if dryRun { return nil } @@ -579,8 +601,8 @@ func setClusterClassPause(proxy Proxy, clusterclasses []*node, pause bool, dryRu } // Nb. The operation is wrapped in a retry loop to make setClusterClassPause more resilient to unexpected conditions. - if err := retryWithExponentialBackoff(setClusterClassPauseBackoff, func() error { - return pauseClusterClass(proxy, clusterclass, pause) + if err := retryWithExponentialBackoff(ctx, setClusterClassPauseBackoff, func(ctx context.Context) error { + return pauseClusterClass(ctx, proxy, clusterclass, pause, mutators...) }); err != nil { return errors.Wrapf(err, "error updating ClusterClass %s/%s", clusterclass.identity.Namespace, clusterclass.identity.Name) } @@ -588,20 +610,90 @@ func setClusterClassPause(proxy Proxy, clusterclasses []*node, pause bool, dryRu return nil } +func waitReadyForMove(ctx context.Context, proxy Proxy, nodes []*node, dryRun bool, backoff wait.Backoff) error { + if dryRun { + return nil + } + + log := logf.Log + + c, err := proxy.NewClient(ctx) + if err != nil { + return errors.Wrap(err, "error creating client") + } + + for _, n := range nodes { + log := log.WithValues( + "apiVersion", n.identity.GroupVersionKind(), + "resource", klog.ObjectRef{ + Name: n.identity.Name, + Namespace: n.identity.Namespace, + }, + ) + if !n.blockingMove { + log.V(5).Info("Resource not blocking move") + continue + } + + obj := &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Name: n.identity.Name, + Namespace: n.identity.Namespace, + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: n.identity.APIVersion, + Kind: n.identity.Kind, + }, + } + key := client.ObjectKeyFromObject(obj) + + blockLogged := false + if err := retryWithExponentialBackoff(ctx, backoff, func(ctx context.Context) error { + if err := c.Get(ctx, key, obj); err != nil { + return errors.Wrapf(err, "error getting %s/%s", obj.GroupVersionKind(), key) + } + + if _, exists := obj.GetAnnotations()[clusterctlv1.BlockMoveAnnotation]; exists { + if !blockLogged { + log.Info(fmt.Sprintf("Move blocked by %s annotation, waiting for it to be removed", clusterctlv1.BlockMoveAnnotation)) + blockLogged = true + } + return errors.Errorf("resource is not ready to move: %s/%s", obj.GroupVersionKind(), key) + } + log.V(5).Info("Resource is ready to move") + return nil + }); err != nil { + return err + } + } + + return nil +} + // patchCluster applies a patch to a node referring to a Cluster object. -func patchCluster(proxy Proxy, cluster *node, patch client.Patch) error { - cFrom, err := proxy.NewClient() +func patchCluster(ctx context.Context, proxy Proxy, n *node, patch client.Patch, mutators ...ResourceMutatorFunc) error { + cFrom, err := proxy.NewClient(ctx) if err != nil { return err } - clusterObj := &clusterv1.Cluster{} - clusterObjKey := client.ObjectKey{ - Namespace: cluster.identity.Namespace, - Name: cluster.identity.Name, + // Since the patch has been generated already in caller of this function, the ONLY affect that mutators can have + // here is on namespace of the resource. + clusterObj, err := applyMutators(&clusterv1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: clusterv1.ClusterKind, + APIVersion: clusterv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: n.identity.Name, + Namespace: n.identity.Namespace, + }, + }, mutators...) + if err != nil { + return err } - if err := cFrom.Get(ctx, clusterObjKey, clusterObj); err != nil { + if err := cFrom.Get(ctx, client.ObjectKeyFromObject(clusterObj), clusterObj); err != nil { return errors.Wrapf(err, "error reading Cluster %s/%s", clusterObj.GetNamespace(), clusterObj.GetName()) } @@ -614,25 +706,42 @@ func patchCluster(proxy Proxy, cluster *node, patch client.Patch) error { return nil } -func pauseClusterClass(proxy Proxy, n *node, pause bool) error { - cFrom, err := proxy.NewClient() +func pauseClusterClass(ctx context.Context, proxy Proxy, n *node, pause bool, mutators ...ResourceMutatorFunc) error { + cFrom, err := proxy.NewClient(ctx) if err != nil { return errors.Wrap(err, "error creating client") } - // Get the ClusterClass from the server + // Get a mutated copy of the ClusterClass to identify the target namespace. + // The ClusterClass could have been moved to a different namespace after the move. + mutatedClusterClass, err := applyMutators(&clusterv1.ClusterClass{ + TypeMeta: metav1.TypeMeta{ + Kind: clusterv1.ClusterClassKind, + APIVersion: clusterv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: n.identity.Name, + Namespace: n.identity.Namespace, + }}, mutators...) + if err != nil { + return err + } + clusterClass := &clusterv1.ClusterClass{} + // Construct an object key using the mutatedClusterClass reflecting any changes to the namespace. clusterClassObjKey := client.ObjectKey{ - Name: n.identity.Name, - Namespace: n.identity.Namespace, + Name: mutatedClusterClass.GetName(), + Namespace: mutatedClusterClass.GetNamespace(), } + // Get a copy of the ClusterClass. + // This will ensure that any other changes from the mutator are ignored here as we work with a fresh copy of the cluster class. if err := cFrom.Get(ctx, clusterClassObjKey, clusterClass); err != nil { return errors.Wrapf(err, "error reading ClusterClass %s/%s", n.identity.Namespace, n.identity.Name) } patchHelper, err := patch.NewHelper(clusterClass, cFrom) if err != nil { - return errors.Wrapf(err, "error creating patcher for ClusterClass %s/%s", n.identity.Namespace, n.identity.Name) + return err } // Update the annotation to the desired state @@ -650,15 +759,12 @@ func pauseClusterClass(proxy Proxy, n *node, pause bool) error { // Update the ClusterClass with the new annotations. clusterClass.SetAnnotations(ccAnnotations) - if err := patchHelper.Patch(ctx, clusterClass); err != nil { - return errors.Wrapf(err, "error patching ClusterClass %s/%s", n.identity.Namespace, n.identity.Name) - } - return nil + return patchHelper.Patch(ctx, clusterClass) } // ensureNamespaces ensures all the expected target namespaces are in place before creating objects. -func (o *objectMover) ensureNamespaces(graph *objectGraph, toProxy Proxy) error { +func (o *objectMover) ensureNamespaces(ctx context.Context, graph *objectGraph, toProxy Proxy) error { if o.dryRun { return nil } @@ -679,8 +785,8 @@ func (o *objectMover) ensureNamespaces(graph *objectGraph, toProxy Proxy) error } namespaces.Insert(namespace) - if err := retryWithExponentialBackoff(ensureNamespaceBackoff, func() error { - return o.ensureNamespace(toProxy, namespace) + if err := retryWithExponentialBackoff(ctx, ensureNamespaceBackoff, func(ctx context.Context) error { + return o.ensureNamespace(ctx, toProxy, namespace) }); err != nil { return err } @@ -690,10 +796,10 @@ func (o *objectMover) ensureNamespaces(graph *objectGraph, toProxy Proxy) error } // ensureNamespace ensures a target namespaces is in place before creating objects. -func (o *objectMover) ensureNamespace(toProxy Proxy, namespace string) error { +func (o *objectMover) ensureNamespace(ctx context.Context, toProxy Proxy, namespace string) error { log := logf.Log - cs, err := toProxy.NewClient() + cs, err := toProxy.NewClient(ctx) if err != nil { return err } @@ -735,7 +841,7 @@ func (o *objectMover) ensureNamespace(toProxy Proxy, namespace string) error { return err } - // If the namespace does not exists, create it. + // If the namespace does not exist, create it. ns = &corev1.Namespace{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -753,15 +859,18 @@ func (o *objectMover) ensureNamespace(toProxy Proxy, namespace string) error { } // createGroup creates all the Kubernetes objects into the target management cluster corresponding to the object graph nodes in a moveGroup. -func (o *objectMover) createGroup(group moveGroup, toProxy Proxy) error { +func (o *objectMover) createGroup(ctx context.Context, group moveGroup, toProxy Proxy, mutators ...ResourceMutatorFunc) error { createTargetObjectBackoff := newWriteBackoff() errList := []error{} + // Maintain a cache of namespaces that have been verified to already exist. + // Nb. This prevents us from making repetitive (and expensive) calls in listing all namespaces to ensure a namespace exists before creating a resource. + existingNamespaces := sets.New[string]() for _, nodeToCreate := range group { // Creates the Kubernetes object corresponding to the nodeToCreate. // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions. - err := retryWithExponentialBackoff(createTargetObjectBackoff, func() error { - return o.createTargetObject(nodeToCreate, toProxy) + err := retryWithExponentialBackoff(ctx, createTargetObjectBackoff, func(ctx context.Context) error { + return o.createTargetObject(ctx, nodeToCreate, toProxy, mutators, existingNamespaces) }) if err != nil { errList = append(errList, err) @@ -775,15 +884,15 @@ func (o *objectMover) createGroup(group moveGroup, toProxy Proxy) error { return nil } -func (o *objectMover) backupGroup(group moveGroup, directory string) error { +func (o *objectMover) backupGroup(ctx context.Context, group moveGroup, directory string) error { backupTargetObjectBackoff := newWriteBackoff() errList := []error{} for _, nodeToBackup := range group { // Backs-up the Kubernetes object corresponding to the nodeToBackup. // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions. - err := retryWithExponentialBackoff(backupTargetObjectBackoff, func() error { - return o.backupTargetObject(nodeToBackup, directory) + err := retryWithExponentialBackoff(ctx, backupTargetObjectBackoff, func(ctx context.Context) error { + return o.backupTargetObject(ctx, nodeToBackup, directory) }) if err != nil { errList = append(errList, err) @@ -797,15 +906,15 @@ func (o *objectMover) backupGroup(group moveGroup, directory string) error { return nil } -func (o *objectMover) restoreGroup(group moveGroup, toProxy Proxy) error { +func (o *objectMover) restoreGroup(ctx context.Context, group moveGroup, toProxy Proxy) error { restoreTargetObjectBackoff := newWriteBackoff() errList := []error{} for _, nodeToRestore := range group { // Creates the Kubernetes object corresponding to the nodeToRestore. // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions. - err := retryWithExponentialBackoff(restoreTargetObjectBackoff, func() error { - return o.restoreTargetObject(nodeToRestore, toProxy) + err := retryWithExponentialBackoff(ctx, restoreTargetObjectBackoff, func(ctx context.Context) error { + return o.restoreTargetObject(ctx, nodeToRestore, toProxy) }) if err != nil { errList = append(errList, err) @@ -820,7 +929,7 @@ func (o *objectMover) restoreGroup(group moveGroup, toProxy Proxy) error { } // createTargetObject creates the Kubernetes object in the target Management cluster corresponding to the object graph node, taking care of restoring the OwnerReference with the owner nodes, if any. -func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) error { +func (o *objectMover) createTargetObject(ctx context.Context, nodeToCreate *node, toProxy Proxy, mutators []ResourceMutatorFunc, existingNamespaces sets.Set[string]) error { log := logf.Log log.V(1).Info("Creating", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace) @@ -828,7 +937,7 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro return nil } - cFrom, err := o.fromProxy.NewClient() + cFrom, err := o.fromProxy.NewClient(ctx) if err != nil { return err } @@ -853,21 +962,32 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro // Removes current OwnerReferences obj.SetOwnerReferences(nil) - // Rebuild the owne reference chain + // Rebuild the owner reference chain o.buildOwnerChain(obj, nodeToCreate) // FIXME Workaround for https://github.com/kubernetes/kubernetes/issues/32220. Remove when the issue is fixed. // If the resource already exists, the API server ordinarily returns an AlreadyExists error. Due to the above issue, if the resource has a non-empty metadata.generateName field, the API server returns a ServerTimeoutError. To ensure that the API server returns an AlreadyExists error, we set the metadata.generateName field to an empty string. - if len(obj.GetName()) > 0 && len(obj.GetGenerateName()) > 0 { + if obj.GetName() != "" && obj.GetGenerateName() != "" { obj.SetGenerateName("") } // Creates the targetObj into the target management cluster. - cTo, err := toProxy.NewClient() + cTo, err := toProxy.NewClient(ctx) if err != nil { return err } + obj, err = applyMutators(obj, mutators...) + if err != nil { + return err + } + // Applying mutators MAY change the namespace, so ensure the namespace exists before creating the resource. + if !nodeToCreate.isGlobal && !existingNamespaces.Has(obj.GetNamespace()) { + if err = o.ensureNamespace(ctx, toProxy, obj.GetNamespace()); err != nil { + return err + } + existingNamespaces.Insert(obj.GetNamespace()) + } oldManagedFields := obj.GetManagedFields() if err := cTo.Create(ctx, obj); err != nil { if !apierrors.IsAlreadyExists(err) { @@ -910,11 +1030,11 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro return nil } -func (o *objectMover) backupTargetObject(nodeToCreate *node, directory string) error { +func (o *objectMover) backupTargetObject(ctx context.Context, nodeToCreate *node, directory string) error { log := logf.Log log.V(1).Info("Saving", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace) - cFrom, err := o.fromProxy.NewClient() + cFrom, err := o.fromProxy.NewClient(ctx) if err != nil { return err } @@ -961,12 +1081,12 @@ func (o *objectMover) backupTargetObject(nodeToCreate *node, directory string) e return nil } -func (o *objectMover) restoreTargetObject(nodeToCreate *node, toProxy Proxy) error { +func (o *objectMover) restoreTargetObject(ctx context.Context, nodeToCreate *node, toProxy Proxy) error { log := logf.Log log.V(1).Info("Restoring", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace) // Creates the targetObj into the target management cluster. - cTo, err := toProxy.NewClient() + cTo, err := toProxy.NewClient(ctx) if err != nil { return err } @@ -983,7 +1103,7 @@ func (o *objectMover) restoreTargetObject(nodeToCreate *node, toProxy Proxy) err if err := cTo.Get(ctx, objKey, existingTargetObj); err == nil { log.V(5).Info("Object already exists, skipping moving from directory", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace) - // Update the nodes UID since it already exits. Any nodes owned by this existing node will be updated when the owner chain is rebuilt + // Update the nodes UID since it already exists. Any nodes owned by this existing node will be updated when the owner chain is rebuilt nodeToCreate.newUID = existingTargetObj.GetUID() // Return early since the object already exists @@ -1043,7 +1163,7 @@ func (o *objectMover) buildOwnerChain(obj *unstructured.Unstructured, n *node) { } // deleteGroup deletes all the Kubernetes objects from the source management cluster corresponding to the object graph nodes in a moveGroup. -func (o *objectMover) deleteGroup(group moveGroup) error { +func (o *objectMover) deleteGroup(ctx context.Context, group moveGroup) error { deleteSourceObjectBackoff := newWriteBackoff() errList := []error{} for i := range group { @@ -1051,8 +1171,8 @@ func (o *objectMover) deleteGroup(group moveGroup) error { // Delete the Kubernetes object corresponding to the current node. // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions. - err := retryWithExponentialBackoff(deleteSourceObjectBackoff, func() error { - return o.deleteSourceObject(nodeToDelete) + err := retryWithExponentialBackoff(ctx, deleteSourceObjectBackoff, func(ctx context.Context) error { + return o.deleteSourceObject(ctx, nodeToDelete) }) if err != nil { @@ -1064,12 +1184,13 @@ func (o *objectMover) deleteGroup(group moveGroup) error { } var ( - removeFinalizersPatch = client.RawPatch(types.MergePatchType, []byte("{\"metadata\":{\"finalizers\":[]}}")) + removeFinalizersPatch = client.RawPatch(types.MergePatchType, []byte("{\"metadata\":{\"finalizers\":[]}}")) + addDeleteForMoveAnnotationPatch = client.RawPatch(types.JSONPatchType, []byte(fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/metadata/annotations\", \"value\":{%q:\"\"}}]", clusterctlv1.DeleteForMoveAnnotation))) ) // deleteSourceObject deletes the Kubernetes object corresponding to the node from the source management cluster, taking care of removing all the finalizers so // the objects gets immediately deleted (force delete). -func (o *objectMover) deleteSourceObject(nodeToDelete *node) error { +func (o *objectMover) deleteSourceObject(ctx context.Context, nodeToDelete *node) error { // Don't delete cluster-wide nodes or nodes that are below a hierarchy that starts with a global object (e.g. a secrets owned by a global identity object). if nodeToDelete.isGlobal || nodeToDelete.isGlobalHierarchy { return nil @@ -1082,7 +1203,7 @@ func (o *objectMover) deleteSourceObject(nodeToDelete *node) error { return nil } - cFrom, err := o.fromProxy.NewClient() + cFrom, err := o.fromProxy.NewClient(ctx) if err != nil { return err } @@ -1106,6 +1227,11 @@ func (o *objectMover) deleteSourceObject(nodeToDelete *node) error { sourceObj.GroupVersionKind(), sourceObj.GetNamespace(), sourceObj.GetName()) } + if err := cFrom.Patch(ctx, sourceObj, addDeleteForMoveAnnotationPatch); err != nil { + return errors.Wrapf(err, "error adding delete-for-move annotation from %q %s/%s", + sourceObj.GroupVersionKind(), sourceObj.GetNamespace(), sourceObj.GetName()) + } + if len(sourceObj.GetFinalizers()) > 0 { if err := cFrom.Patch(ctx, sourceObj, removeFinalizersPatch); err != nil { return errors.Wrapf(err, "error removing finalizers from %q %s/%s", @@ -1122,18 +1248,18 @@ func (o *objectMover) deleteSourceObject(nodeToDelete *node) error { } // checkTargetProviders checks that all the providers installed in the source cluster exists in the target cluster as well (with a version >= of the current version). -func (o *objectMover) checkTargetProviders(toInventory InventoryClient) error { +func (o *objectMover) checkTargetProviders(ctx context.Context, toInventory InventoryClient) error { if o.dryRun { return nil } // Gets the list of providers in the source/target cluster. - fromProviders, err := o.fromProviderInventory.List() + fromProviders, err := o.fromProviderInventory.List(ctx) if err != nil { return errors.Wrapf(err, "failed to get provider list from the source cluster") } - toProviders, err := toInventory.List() + toProviders, err := toInventory.List(ctx) if err != nil { return errors.Wrapf(err, "failed to get provider list from the target cluster") } @@ -1188,3 +1314,25 @@ func patchTopologyManagedFields(ctx context.Context, oldManagedFields []metav1.M } return nil } + +// applyMutators applies mutators to an object. +// Note: TypeMeta must always be set in the object because otherwise after conversion the +// resulting Unstructured would have an empty GVK. +func applyMutators(object client.Object, mutators ...ResourceMutatorFunc) (*unstructured.Unstructured, error) { + if object == nil { + return nil, nil + } + u := &unstructured.Unstructured{} + to, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) + if err != nil { + return nil, err + } + u.SetUnstructuredContent(to) + for _, mutator := range mutators { + if err := mutator(u); err != nil { + return nil, errors.Wrapf(err, "error applying resource mutator to %q %s/%s", + u.GroupVersionKind(), object.GetNamespace(), object.GetName()) + } + } + return u, nil +} diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index 56b10250d441..edc3077143db 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "os" "path/filepath" @@ -29,7 +30,10 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/pointer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -49,6 +53,32 @@ var moveTests = []struct { wantMoveGroups [][]string wantErr bool }{ + { + name: "Cluster with ClusterClass", + fields: moveTestsFields{ + objs: func() []client.Object { + objs := test.NewFakeClusterClass("ns1", "class1").Objs() + objs = append(objs, test.NewFakeCluster("ns1", "foo").WithTopologyClass("class1").Objs()...) + return deduplicateObjects(objs) + }(), + }, + wantMoveGroups: [][]string{ + { // group 1 + "cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1", + }, + { // group 2 + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureClusterTemplate, ns1/class1", + "controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlaneTemplate, ns1/class1", + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo", + }, + { // group 3 + "/v1, Kind=Secret, ns1/foo-ca", + "/v1, Kind=Secret, ns1/foo-kubeconfig", + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo", + }, + }, + wantErr: false, + }, { name: "Cluster", fields: moveTestsFields{ @@ -221,6 +251,54 @@ var moveTests = []struct { }, wantErr: false, }, + { + name: "Cluster with MachineDeployment with a static bootstrap config", + fields: moveTestsFields{ + objs: test.NewFakeCluster("ns1", "cluster1"). + WithMachineDeployments( + test.NewFakeMachineDeployment("md1"). + WithStaticBootstrapConfig(). + WithMachineSets( + test.NewFakeMachineSet("ms1"). + WithStaticBootstrapConfig(). + WithMachines( + test.NewFakeMachine("m1"). + WithStaticBootstrapConfig(), + test.NewFakeMachine("m2"). + WithStaticBootstrapConfig(), + ), + ), + ).Objs(), + }, + wantMoveGroups: [][]string{ + { // group 1 + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + { // group 2 (objects with ownerReferences in group 1) + // owned by Clusters + "/v1, Kind=Secret, ns1/cluster1-ca", + "/v1, Kind=Secret, ns1/cluster1-kubeconfig", + "cluster.x-k8s.io/v1beta1, Kind=MachineDeployment, ns1/md1", + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1", + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/md1", + }, + { // group 3 (objects with ownerReferences in group 1,2) + // owned by MachineDeployments + "cluster.x-k8s.io/v1beta1, Kind=MachineSet, ns1/ms1", + }, + { // group 4 (objects with ownerReferences in group 1,2,3) + // owned by MachineSets + "cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m1", + "cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m2", + }, + { // group 5 (objects with ownerReferences in group 1,2,3,4) + // owned by Machines + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m1", + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m2", + }, + }, + wantErr: false, + }, { name: "Cluster with Control Plane", fields: moveTestsFields{ @@ -651,14 +729,16 @@ func Test_objectMover_backupTargetObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) // Run backupTargetObject on nodes in graph mover := objectMover{ @@ -672,13 +752,13 @@ func Test_objectMover_backupTargetObject(t *testing.T) { defer os.RemoveAll(dir) for _, node := range graph.uidToNode { - err = mover.backupTargetObject(node, dir) + err = mover.backupTargetObject(ctx, node, dir) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // objects are stored and serialized correctly in the temporary directory expectedFilename := node.getFilename() @@ -690,13 +770,13 @@ func Test_objectMover_backupTargetObject(t *testing.T) { path := filepath.Join(dir, expectedFilename) fileContents, err := os.ReadFile(path) //nolint:gosec if err != nil { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) return } firstFileStat, err := os.Stat(path) if err != nil { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) return } @@ -707,17 +787,17 @@ func Test_objectMover_backupTargetObject(t *testing.T) { time.Sleep(time.Millisecond * 50) // Running backupTargetObject should override any existing files since it represents a new toDirectory - err = mover.backupTargetObject(node, dir) + err = mover.backupTargetObject(ctx, node, dir) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) secondFileStat, err := os.Stat(path) if err != nil { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) return } @@ -733,10 +813,12 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // temporary directory dir, err := os.MkdirTemp("/tmp", "cluster-api") if err != nil { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } defer os.RemoveAll(dir) @@ -744,10 +826,10 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { graph := getObjectGraph() // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) // gets a fakeProxy to an empty cluster with all the required CRDs toProxy := getFakeProxyWithCRDs() @@ -760,32 +842,32 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { // Write go string slice to directory for _, file := range tt.files { tempFile, err := os.CreateTemp(dir, "obj") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) _, err = tempFile.WriteString(file) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(tempFile.Close()).To(Succeed()) } objs, err := mover.filesToObjs(dir) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) for i := range objs { - g.Expect(graph.addRestoredObj(&objs[i])).NotTo(HaveOccurred()) + g.Expect(graph.addRestoredObj(&objs[i])).ToNot(HaveOccurred()) } for _, node := range graph.uidToNode { - err = mover.restoreTargetObject(node, toProxy) + err = mover.restoreTargetObject(ctx, node, toProxy) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // Check objects are in new restored cluster - csTo, err := toProxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + csTo, err := toProxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) key := client.ObjectKey{ Namespace: node.identity.Namespace, @@ -798,22 +880,22 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { oTo.SetKind(node.identity.Kind) if err := csTo.Get(ctx, key, oTo); err != nil { - t.Errorf("error = %v when checking for %v created in target cluster", err, key) + t.Errorf("error = %v when checking for %s %v created in target cluster", err, oTo.GetKind(), key) continue } // Re-running restoreTargetObjects won't override existing objects - err = mover.restoreTargetObject(node, toProxy) + err = mover.restoreTargetObject(ctx, node, toProxy) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // Check objects are in new restored cluster - csAfter, err := toProxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + csAfter, err := toProxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) keyAfter := client.ObjectKey{ Namespace: node.identity.Namespace, @@ -826,7 +908,7 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { oAfter.SetKind(node.identity.Kind) if err := csAfter.Get(ctx, keyAfter, oAfter); err != nil { - t.Errorf("error = %v when checking for %v created in target cluster", err, key) + t.Errorf("error = %v when checking for %s %v created in target cluster", err, oAfter.GetKind(), key) continue } @@ -834,7 +916,7 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { g.Expect(oAfter.GetName()).Should(Equal(oTo.GetName())) g.Expect(oAfter.GetCreationTimestamp()).Should(Equal(oTo.GetCreationTimestamp())) g.Expect(oAfter.GetUID()).Should(Equal(oTo.GetUID())) - g.Expect(oAfter.GetOwnerReferences()).Should(Equal(oTo.GetOwnerReferences())) + g.Expect(oAfter.GetOwnerReferences()).Should(BeComparableTo(oTo.GetOwnerReferences())) } }) } @@ -846,14 +928,16 @@ func Test_objectMover_toDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) // Run toDirectory mover := objectMover{ @@ -866,17 +950,17 @@ func Test_objectMover_toDirectory(t *testing.T) { } defer os.RemoveAll(dir) - err = mover.toDirectory(graph, dir) + err = mover.toDirectory(ctx, graph, dir) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // check that the objects are stored in the temporary directory but not deleted from the source cluster - csFrom, err := graph.proxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + csFrom, err := graph.proxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) missingFiles := []string{} for _, node := range graph.uidToNode { @@ -891,11 +975,11 @@ func Test_objectMover_toDirectory(t *testing.T) { oFrom.SetKind(node.identity.Kind) err := csFrom.Get(ctx, key, oFrom) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // objects are stored in the temporary directory with the expected filename files, err := os.ReadDir(dir) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) expectedFilename := node.getFilename() found := false @@ -935,7 +1019,7 @@ func Test_objectMover_filesToObjs(t *testing.T) { } _, err = file.WriteString(tt.files[fileName]) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(file.Close()).To(Succeed()) } @@ -953,7 +1037,7 @@ func Test_objectMover_filesToObjs(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) missingObjs := []unstructured.Unstructured{} for _, obj := range objs { @@ -980,10 +1064,12 @@ func Test_objectMover_fromDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // temporary directory dir, err := os.MkdirTemp("/tmp", "cluster-api") if err != nil { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } defer os.RemoveAll(dir) @@ -991,7 +1077,7 @@ func Test_objectMover_fromDirectory(t *testing.T) { graph := getObjectGraph() // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed()) // gets a fakeProxy to an empty cluster with all the required CRDs toProxy := getFakeProxyWithCRDs() @@ -1004,18 +1090,18 @@ func Test_objectMover_fromDirectory(t *testing.T) { // Write go string slice to directory for _, file := range tt.files { tempFile, err := os.CreateTemp(dir, "obj") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) _, err = tempFile.WriteString(file) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(tempFile.Close()).To(Succeed()) } objs, err := mover.filesToObjs(dir) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) for i := range objs { - g.Expect(graph.addRestoredObj(&objs[i])).NotTo(HaveOccurred()) + g.Expect(graph.addRestoredObj(&objs[i])).ToNot(HaveOccurred()) } // fromDirectory works on the target cluster which does not yet have objs to discover @@ -1025,17 +1111,17 @@ func Test_objectMover_fromDirectory(t *testing.T) { graph.setTenants() graph.checkVirtualNode() - err = mover.fromDirectory(graph, toProxy) + err = mover.fromDirectory(ctx, graph, toProxy) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // Check objects are in new restored cluster - csTo, err := toProxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + csTo, err := toProxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) for _, node := range graph.uidToNode { key := client.ObjectKey{ @@ -1049,7 +1135,7 @@ func Test_objectMover_fromDirectory(t *testing.T) { oTo.SetKind(node.identity.Kind) if err := csTo.Get(ctx, key, oTo); err != nil { - t.Errorf("error = %v when checking for %v created in target cluster", err, key) + t.Errorf("error = %v when checking for %s %v created in target cluster", err, oTo.GetKind(), key) continue } } @@ -1063,14 +1149,16 @@ func Test_getMoveSequence(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) moveSequence := getMoveSequence(graph) g.Expect(moveSequence.groups).To(HaveLen(len(tt.wantMoveGroups))) @@ -1094,14 +1182,16 @@ func Test_objectMover_move_dryRun(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) // gets a fakeProxy to an empty cluster with all the required CRDs toProxy := getFakeProxyWithCRDs() @@ -1112,20 +1202,20 @@ func Test_objectMover_move_dryRun(t *testing.T) { dryRun: true, } - err := mover.move(graph, toProxy) + err := mover.move(ctx, graph, toProxy, nil) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // check that the objects are kept in the source cluster and are not created in the target cluster - csFrom, err := graph.proxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + csFrom, err := graph.proxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) - csTo, err := toProxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + csTo, err := toProxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) for _, node := range graph.uidToNode { key := client.ObjectKey{ Namespace: node.identity.Namespace, @@ -1137,7 +1227,7 @@ func Test_objectMover_move_dryRun(t *testing.T) { oFrom.SetKind(node.identity.Kind) if err := csFrom.Get(ctx, key, oFrom); err != nil { - t.Errorf("error = %v when checking for %v kept in source cluster", err, key) + t.Errorf("error = %v when checking for %s %v kept in source cluster", err, oFrom.GetKind(), key) continue } @@ -1149,11 +1239,11 @@ func Test_objectMover_move_dryRun(t *testing.T) { err := csTo.Get(ctx, key, oTo) if err == nil { if oFrom.GetNamespace() != "" { - t.Errorf("%v created in target cluster which should not", key) + t.Errorf("%s %v created in target cluster which should not", oFrom.GetKind(), key) continue } } else if !apierrors.IsNotFound(err) { - t.Errorf("error = %v when checking for %v should not created ojects in target cluster", err, key) + t.Errorf("error = %v when checking for %s %v should not created ojects in target cluster", err, oFrom.GetKind(), key) continue } } @@ -1167,14 +1257,16 @@ func Test_objectMover_move(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) // gets a fakeProxy to an empty cluster with all the required CRDs toProxy := getFakeProxyWithCRDs() @@ -1183,21 +1275,133 @@ func Test_objectMover_move(t *testing.T) { mover := objectMover{ fromProxy: graph.proxy, } + err := mover.move(ctx, graph, toProxy) + + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).ToNot(HaveOccurred()) + + // check that the objects are removed from the source cluster and are created in the target cluster + csFrom, err := graph.proxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) + + csTo, err := toProxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) + + for _, node := range graph.uidToNode { + key := client.ObjectKey{ + Namespace: node.identity.Namespace, + Name: node.identity.Name, + } + + // objects are deleted from the source cluster + oFrom := &unstructured.Unstructured{} + oFrom.SetAPIVersion(node.identity.APIVersion) + oFrom.SetKind(node.identity.Kind) + + err := csFrom.Get(ctx, key, oFrom) + if err == nil { + if !node.isGlobal && !node.isGlobalHierarchy { + t.Errorf("%s %v not deleted in source cluster", oFrom.GetKind(), key) + continue + } + } else if !apierrors.IsNotFound(err) { + t.Errorf("error = %v when checking for %s %v deleted in source cluster", err, oFrom.GetKind(), key) + continue + } + + // objects are created in the target cluster + oTo := &unstructured.Unstructured{} + oTo.SetAPIVersion(node.identity.APIVersion) + oTo.SetKind(node.identity.Kind) + + if err := csTo.Get(ctx, key, oTo); err != nil { + t.Errorf("error = %v when checking for %s %v created in target cluster", err, oFrom.GetKind(), key) + continue + } + } + }) + } +} + +func Test_objectMover_move_with_Mutator(t *testing.T) { + // NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process + // we use same mutator function for all tests and validate outcome based on input. + for _, tt := range moveTests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + + toNamespace := "foobar" + updateKnownKinds := map[string][][]string{ + "Cluster": { + {"metadata", "namespace"}, + {"spec", "controlPlaneRef", "namespace"}, + {"spec", "infrastructureRef", "namespace"}, + {"unknown", "field", "does", "not", "cause", "errors"}, + }, + "KubeadmControlPlane": { + {"spec", "machineTemplate", "infrastructureRef", "namespace"}, + }, + "Machine": { + {"spec", "bootstrap", "configRef", "namespace"}, + {"spec", "infrastructureRef", "namespace"}, + }, + } + var namespaceMutator ResourceMutatorFunc = func(u *unstructured.Unstructured) error { + if u == nil || u.Object == nil { + return nil + } + if u.GetNamespace() != "" { + u.SetNamespace(toNamespace) + } + if fields, knownKind := updateKnownKinds[u.GetKind()]; knownKind { + for _, nsField := range fields { + _, exists, err := unstructured.NestedFieldNoCopy(u.Object, nsField...) + g.Expect(err).ToNot(HaveOccurred()) + if exists { + g.Expect(unstructured.SetNestedField(u.Object, toNamespace, nsField...)).To(Succeed()) + } + } + } + return nil + } + + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. + graph := getObjectGraphWithObjs(tt.fields.objs) + + // Get all the types to be considered for discovery + g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed()) + + // trigger discovery the content of the source cluster + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) + + // gets a fakeProxy to an empty cluster with all the required CRDs + toProxy := getFakeProxyWithCRDs() + + // Run move with mutators + mover := objectMover{ + fromProxy: graph.proxy, + } - err := mover.move(graph, toProxy) + err := mover.move(ctx, graph, toProxy, namespaceMutator) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // check that the objects are removed from the source cluster and are created in the target cluster - csFrom, err := graph.proxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + csFrom, err := graph.proxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) - csTo, err := toProxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + csTo, err := toProxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) for _, node := range graph.uidToNode { key := client.ObjectKey{ @@ -1213,11 +1417,11 @@ func Test_objectMover_move(t *testing.T) { err := csFrom.Get(ctx, key, oFrom) if err == nil { if !node.isGlobal && !node.isGlobalHierarchy { - t.Errorf("%v not deleted in source cluster", key) + t.Errorf("%s %v not deleted in source cluster", oFrom.GetKind(), key) continue } } else if !apierrors.IsNotFound(err) { - t.Errorf("error = %v when checking for %v deleted in source cluster", err, key) + t.Errorf("error = %v when checking for %s %v deleted in source cluster", err, oFrom.GetKind(), key) continue } @@ -1225,11 +1429,23 @@ func Test_objectMover_move(t *testing.T) { oTo := &unstructured.Unstructured{} oTo.SetAPIVersion(node.identity.APIVersion) oTo.SetKind(node.identity.Kind) + if !node.isGlobal { + key.Namespace = toNamespace + } if err := csTo.Get(ctx, key, oTo); err != nil { - t.Errorf("error = %v when checking for %v created in target cluster", err, key) + t.Errorf("error = %v when checking for %s %v created in target cluster", err, oFrom.GetKind(), key) continue } + if fields, knownKind := updateKnownKinds[oTo.GetKind()]; knownKind { + for _, nsField := range fields { + value, exists, err := unstructured.NestedFieldNoCopy(oTo.Object, nsField...) + g.Expect(err).ToNot(HaveOccurred()) + if exists { + g.Expect(value).To(Equal(toNamespace)) + } + } + } } }) } @@ -1438,23 +1654,25 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) o := &objectMover{ fromProxy: graph.proxy, } - err := o.checkProvisioningCompleted(graph) + err := o.checkProvisioningCompleted(ctx, graph) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } }) } @@ -1529,14 +1747,16 @@ func Test_objectsMoverService_checkTargetProviders(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + o := &objectMover{ fromProviderInventory: newInventoryClient(tt.fields.fromProxy, nil), } - err := o.checkTargetProviders(newInventoryClient(tt.args.toProxy, nil)) + err := o.checkTargetProviders(ctx, newInventoryClient(tt.args.toProxy, nil)) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } }) } @@ -1582,16 +1802,18 @@ func Test_objectMoverService_ensureNamespace(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + mover := objectMover{ fromProxy: test.NewFakeProxy(), } - err := mover.ensureNamespace(tt.args.toProxy, tt.args.namespace) - g.Expect(err).NotTo(HaveOccurred()) + err := mover.ensureNamespace(ctx, tt.args.toProxy, tt.args.namespace) + g.Expect(err).ToNot(HaveOccurred()) // Check that the namespaces either existed or were created in the // target. - csTo, err := tt.args.toProxy.NewClient() + csTo, err := tt.args.toProxy.NewClient(ctx) g.Expect(err).ToNot(HaveOccurred()) ns := &corev1.Namespace{} @@ -1679,24 +1901,26 @@ func Test_objectMoverService_ensureNamespaces(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed()) // Trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) mover := objectMover{ fromProxy: graph.proxy, } - err := mover.ensureNamespaces(graph, tt.args.toProxy) - g.Expect(err).NotTo(HaveOccurred()) + err := mover.ensureNamespaces(ctx, graph, tt.args.toProxy) + g.Expect(err).ToNot(HaveOccurred()) // Check that the namespaces either existed or were created in the // target. - csTo, err := tt.args.toProxy.NewClient() + csTo, err := tt.args.toProxy.NewClient(ctx) g.Expect(err).ToNot(HaveOccurred()) namespaces := &corev1.NamespaceList{} @@ -1791,20 +2015,25 @@ func Test_createTargetObject(t *testing.T) { APIVersion: "cluster.x-k8s.io/v1beta1", }, }: { - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }, }, }, want: func(g *WithT, toClient client.Client) { + ns := &corev1.Namespace{} + nsKey := client.ObjectKey{ + Name: "ns1", + } + g.Expect(toClient.Get(context.Background(), nsKey, ns)).To(Succeed()) c := &clusterv1.Cluster{} key := client.ObjectKey{ Namespace: "ns1", Name: "foo", } - g.Expect(toClient.Get(ctx, key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) g.Expect(c.OwnerReferences).To(HaveLen(1)) - g.Expect(c.OwnerReferences[0].Controller).To(Equal(pointer.Bool(true))) + g.Expect(c.OwnerReferences[0].Controller).To(Equal(ptr.To(true))) }, }, { @@ -1842,7 +2071,7 @@ func Test_createTargetObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(toClient.Get(ctx, key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) g.Expect(c.Annotations).To(BeEmpty()) }, }, @@ -1878,7 +2107,7 @@ func Test_createTargetObject(t *testing.T) { key := client.ObjectKey{ Name: "foo", } - g.Expect(toClient.Get(ctx, key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) g.Expect(c.Annotations).ToNot(BeEmpty()) }, }, @@ -1918,7 +2147,7 @@ func Test_createTargetObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(toClient.Get(ctx, key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) g.Expect(c.Annotations).ToNot(BeEmpty()) }, }, @@ -1928,19 +2157,21 @@ func Test_createTargetObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + mover := objectMover{ fromProxy: tt.args.fromProxy, } - err := mover.createTargetObject(tt.args.node, tt.args.toProxy) + err := mover.createTargetObject(ctx, tt.args.node, tt.args.toProxy, nil, sets.New[string]()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) - toClient, err := tt.args.toProxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + toClient, err := tt.args.toProxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) tt.want(g, toClient) }) @@ -1977,7 +2208,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(ctx, key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) }, }, { @@ -2006,7 +2237,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(ctx, key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) }, }, { @@ -2034,7 +2265,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(ctx, key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) }, }, { @@ -2064,7 +2295,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(ctx, key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) }, }, } @@ -2073,17 +2304,107 @@ func Test_deleteSourceObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + mover := objectMover{ fromProxy: tt.args.fromProxy, } - err := mover.deleteSourceObject(tt.args.node) - g.Expect(err).NotTo(HaveOccurred()) + err := mover.deleteSourceObject(ctx, tt.args.node) + g.Expect(err).ToNot(HaveOccurred()) - fromClient, err := tt.args.fromProxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + fromClient, err := tt.args.fromProxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) tt.want(g, fromClient) }) } } + +func TestWaitReadyForMove(t *testing.T) { + tests := []struct { + name string + moveBlocked bool + doUnblock bool + wantErr bool + }{ + { + name: "moving blocked cluster should fail", + moveBlocked: true, + wantErr: true, + }, + { + name: "moving unblocked cluster should succeed", + moveBlocked: false, + wantErr: false, + }, + { + name: "moving blocked cluster that is eventually unblocked should succeed", + moveBlocked: true, + doUnblock: true, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clusterName := "foo" + clusterNamespace := "ns1" + objs := test.NewFakeCluster(clusterNamespace, clusterName).Objs() + + ctx := context.Background() + + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. + graph := getObjectGraphWithObjs(objs) + + if tt.moveBlocked { + c, err := graph.proxy.NewClient(ctx) + g.Expect(err).NotTo(HaveOccurred()) + + cluster := &clusterv1.Cluster{} + err = c.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, cluster) + g.Expect(err).NotTo(HaveOccurred()) + anns := cluster.GetAnnotations() + if anns == nil { + anns = make(map[string]string) + } + anns[clusterctlv1.BlockMoveAnnotation] = "anything" + cluster.SetAnnotations(anns) + + g.Expect(c.Update(ctx, cluster)).To(Succeed()) + + if tt.doUnblock { + go func() { + time.Sleep(50 * time.Millisecond) + delete(cluster.Annotations, clusterctlv1.BlockMoveAnnotation) + g.Expect(c.Update(ctx, cluster)).To(Succeed()) + }() + } + } + + // Get all the types to be considered for discovery + g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed()) + + // trigger discovery the content of the source cluster + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) + + backoff := wait.Backoff{ + Steps: 1, + } + if tt.doUnblock { + backoff = wait.Backoff{ + Duration: 20 * time.Millisecond, + Steps: 10, + } + } + err := waitReadyForMove(ctx, graph.proxy, graph.getMoveNodes(), false, backoff) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} diff --git a/cmd/clusterctl/client/cluster/objectgraph.go b/cmd/clusterctl/client/cluster/objectgraph.go index 3d6c86e1fc19..90f072e20d19 100644 --- a/cmd/clusterctl/client/cluster/objectgraph.go +++ b/cmd/clusterctl/client/cluster/objectgraph.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "strings" @@ -37,6 +38,7 @@ import ( ) const clusterTopologyNameKey = "cluster.spec.topology.class" +const clusterResourceSetBindingClusterNameKey = "clusterresourcesetbinding.spec.clustername" type empty struct{} @@ -90,6 +92,10 @@ type node struct { // E.g. for the cluster object we capture information to see if the cluster uses a manged topology // and the cluster class used. additionalInfo map[string]interface{} + + // blockingMove is true when the object should prevent a move operation from proceeding as indicated by + // the presence of the block-move annotation. + blockingMove bool } type discoveryTypeInfo struct { @@ -146,6 +152,17 @@ func (n *node) captureAdditionalInformation(obj *unstructured.Unstructured) erro } } + // If the node is a ClusterResourceSetBinding capture the name of the cluster it is referencing to. + if n.identity.GroupVersionKind().GroupKind() == addonsv1.GroupVersion.WithKind("ClusterResourceSetBinding").GroupKind() { + binding := &addonsv1.ClusterResourceSetBinding{} + if err := localScheme.Convert(obj, binding, nil); err != nil { + return errors.Wrapf(err, "failed to convert object %s to ClusterResourceSetBinding", n.identityStr()) + } + if n.additionalInfo == nil { + n.additionalInfo = map[string]interface{}{} + } + n.additionalInfo[clusterResourceSetBindingClusterNameKey] = binding.Spec.ClusterName + } return nil } @@ -307,15 +324,17 @@ func (o *objectGraph) objMetaToNode(obj *unstructured.Unstructured, n *node) { n.isGlobal = true } } + + _, n.blockingMove = obj.GetAnnotations()[clusterctlv1.BlockMoveAnnotation] } // getDiscoveryTypes returns the list of TypeMeta to be considered for the move discovery phase. // This list includes all the types defines by the CRDs installed by clusterctl and the ConfigMap/Secret core types. -func (o *objectGraph) getDiscoveryTypes() error { +func (o *objectGraph) getDiscoveryTypes(ctx context.Context) error { crdList := &apiextensionsv1.CustomResourceDefinitionList{} getDiscoveryTypesBackoff := newReadBackoff() - if err := retryWithExponentialBackoff(getDiscoveryTypesBackoff, func() error { - return getCRDList(o.proxy, crdList) + if err := retryWithExponentialBackoff(ctx, getDiscoveryTypesBackoff, func(ctx context.Context) error { + return getCRDList(ctx, o.proxy, crdList) }); err != nil { return err } @@ -385,8 +404,8 @@ func getKindAPIString(typeMeta metav1.TypeMeta) string { return fmt.Sprintf("%ss.%s", strings.ToLower(typeMeta.Kind), api) } -func getCRDList(proxy Proxy, crdList *apiextensionsv1.CustomResourceDefinitionList) error { - c, err := proxy.NewClient() +func getCRDList(ctx context.Context, proxy Proxy, crdList *apiextensionsv1.CustomResourceDefinitionList) error { + c, err := proxy.NewClient(ctx) if err != nil { return err } @@ -399,7 +418,7 @@ func getCRDList(proxy Proxy, crdList *apiextensionsv1.CustomResourceDefinitionLi // Discovery reads all the Kubernetes objects existing in a namespace (or in all namespaces if empty) for the types received in input, and then adds // everything to the objects graph. -func (o *objectGraph) Discovery(namespace string) error { +func (o *objectGraph) Discovery(ctx context.Context, namespace string) error { log := logf.Log log.Info("Discovering Cluster API objects") @@ -413,15 +432,15 @@ func (o *objectGraph) Discovery(namespace string) error { typeMeta := discoveryType.typeMeta objList := new(unstructured.UnstructuredList) - if err := retryWithExponentialBackoff(discoveryBackoff, func() error { - return getObjList(o.proxy, typeMeta, selectors, objList) + if err := retryWithExponentialBackoff(ctx, discoveryBackoff, func(ctx context.Context) error { + return getObjList(ctx, o.proxy, typeMeta, selectors, objList) }); err != nil { return err } // if we are discovering Secrets, also secrets from the providers namespace should be included. - if discoveryType.typeMeta.GetObjectKind().GroupVersionKind().GroupKind() == corev1.SchemeGroupVersion.WithKind("SecretList").GroupKind() { - providers, err := o.providerInventory.List() + if discoveryType.typeMeta.GetObjectKind().GroupVersionKind().GroupKind() == corev1.SchemeGroupVersion.WithKind("Secret").GroupKind() { + providers, err := o.providerInventory.List(ctx) if err != nil { return err } @@ -429,8 +448,8 @@ func (o *objectGraph) Discovery(namespace string) error { if p.Type == string(clusterctlv1.InfrastructureProviderType) { providerNamespaceSelector := []client.ListOption{client.InNamespace(p.Namespace)} providerNamespaceSecretList := new(unstructured.UnstructuredList) - if err := retryWithExponentialBackoff(discoveryBackoff, func() error { - return getObjList(o.proxy, typeMeta, providerNamespaceSelector, providerNamespaceSecretList) + if err := retryWithExponentialBackoff(ctx, discoveryBackoff, func(ctx context.Context) error { + return getObjList(ctx, o.proxy, typeMeta, providerNamespaceSelector, providerNamespaceSecretList) }); err != nil { return err } @@ -443,7 +462,7 @@ func (o *objectGraph) Discovery(namespace string) error { continue } - log.V(5).Info(typeMeta.Kind, "Count", len(objList.Items)) + log.V(5).Info(typeMeta.Kind, "count", len(objList.Items)) for i := range objList.Items { obj := objList.Items[i] if err := o.addObj(&obj); err != nil { @@ -452,7 +471,7 @@ func (o *objectGraph) Discovery(namespace string) error { } } - log.V(1).Info("Total objects", "Count", len(o.uidToNode)) + log.V(1).Info("Total objects", "count", len(o.uidToNode)) // Completes the graph by searching for soft ownership relations such as secrets linked to the cluster // by a naming convention (without any explicit OwnerReference). @@ -464,8 +483,8 @@ func (o *objectGraph) Discovery(namespace string) error { return nil } -func getObjList(proxy Proxy, typeMeta metav1.TypeMeta, selectors []client.ListOption, objList *unstructured.UnstructuredList) error { - c, err := proxy.NewClient() +func getObjList(ctx context.Context, proxy Proxy, typeMeta metav1.TypeMeta, selectors []client.ListOption, objList *unstructured.UnstructuredList) error { + c, err := proxy.NewClient(ctx) if err != nil { return err } @@ -504,6 +523,17 @@ func (o *objectGraph) getClusterClasses() []*node { return clusterClasses } +// getClusterResourceSetBinding returns the list of ClusterResourceSetBinding existing in the object graph. +func (o *objectGraph) getClusterResourceSetBinding() []*node { + crs := []*node{} + for _, node := range o.uidToNode { + if node.identity.GroupVersionKind().GroupKind() == addonsv1.GroupVersion.WithKind("ClusterResourceSetBinding").GroupKind() { + crs = append(crs, node) + } + } + return crs +} + // getClusters returns the list of Secrets existing in the object graph. func (o *objectGraph) getSecrets() []*node { secrets := []*node{} @@ -588,7 +618,7 @@ func (o *objectGraph) setSoftOwnership() { // Cluster that uses a ClusterClass are soft owned by that ClusterClass. for _, clusterClass := range clusterClasses { for _, cluster := range clusters { - // if the cluster uses a managed topoloy and uses the clusterclass + // if the cluster uses a managed topology and uses the clusterclass // set the clusterclass as a soft owner of the cluster. if className, ok := cluster.additionalInfo[clusterTopologyNameKey]; ok { if className == clusterClass.identity.Name && clusterClass.identity.Namespace == cluster.identity.Namespace { @@ -597,6 +627,21 @@ func (o *objectGraph) setSoftOwnership() { } } } + + crsBindings := o.getClusterResourceSetBinding() + // ClusterResourceSetBinding that refers to a Cluster are soft owned by that Cluster. + for _, binding := range crsBindings { + clusterName, ok := binding.additionalInfo[clusterResourceSetBindingClusterNameKey] + if !ok { + continue + } + + for _, cluster := range clusters { + if clusterName == cluster.identity.Name && binding.identity.Namespace == cluster.identity.Namespace { + binding.addSoftOwner(cluster) + } + } + } } // setTenants identifies all the nodes linked to a parent with forceMoveHierarchy = true (e.g. Clusters or ClusterResourceSet) diff --git a/cmd/clusterctl/client/cluster/objectgraph_test.go b/cmd/clusterctl/client/cluster/objectgraph_test.go index 4f9ee1d1f89e..258acda9d0eb 100644 --- a/cmd/clusterctl/client/cluster/objectgraph_test.go +++ b/cmd/clusterctl/client/cluster/objectgraph_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "sort" "testing" @@ -221,14 +222,16 @@ func TestObjectGraph_getDiscoveryTypeMetaList(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + graph := newObjectGraph(tt.fields.proxy, nil) - err := graph.getDiscoveryTypes() + err := graph.getDiscoveryTypes(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(graph.types).To(Equal(tt.want)) }) } @@ -252,7 +255,7 @@ func assertGraph(t *testing.T, got *objectGraph, want wantGraph) { g := NewWithT(t) - g.Expect(got.uidToNode).To(HaveLen(len(want.nodes)), "the number of nodes in the objectGraph doesn't match the number of expected nodes") + g.Expect(got.uidToNode).To(HaveLen(len(want.nodes)), "the number of nodes in the objectGraph doesn't match the number of expected nodes - got: %d expected: %d", len(got.uidToNode), len(want.nodes)) for uid, wantNode := range want.nodes { gotNode, ok := got.uidToNode[types.UID(uid)] @@ -810,6 +813,83 @@ var objectGraphsTests = []struct { }, }, }, + { + name: "Cluster with MachineDeployment without a BootstrapConfigRef", + args: objectGraphTestArgs{ + objs: test.NewFakeCluster("ns1", "cluster1"). + WithMachineDeployments( + test.NewFakeMachineDeployment("md1"). + WithStaticBootstrapConfig(). + WithMachineSets( + test.NewFakeMachineSet("ms1"). + WithMachines( + test.NewFakeMachine("m1"), + ), + ), + ).Objs(), + }, + want: wantGraph{ + nodes: map[string]wantGraphItem{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1": { + forceMove: true, + forceMoveHierarchy: true, + }, + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + }, + "/v1, Kind=Secret, ns1/cluster1-ca": { + softOwners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref + }, + }, + "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + }, + + "cluster.x-k8s.io/v1beta1, Kind=MachineDeployment, ns1/md1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + }, + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/md1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + }, + + "cluster.x-k8s.io/v1beta1, Kind=MachineSet, ns1/ms1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=MachineDeployment, ns1/md1", + }, + }, + + "cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=MachineSet, ns1/ms1", + }, + }, + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m1", + }, + }, + "bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/m1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m1", + }, + }, + "/v1, Kind=Secret, ns1/m1": { + owners: []string{ + "bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/m1", + }, + }, + }, + }, + }, { name: "Cluster with Control Plane", args: objectGraphTestArgs{ @@ -1120,6 +1200,8 @@ var objectGraphsTests = []struct { "addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSetBinding, ns1/cluster1": { owners: []string{ "addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSet, ns1/crs1", + }, + softOwners: []string{ "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", }, }, @@ -1201,12 +1283,16 @@ var objectGraphsTests = []struct { "addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSetBinding, ns1/cluster1": { owners: []string{ "addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSet, ns1/crs1", + }, + softOwners: []string{ "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", }, }, "addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSetBinding, ns1/cluster2": { owners: []string{ "addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSet, ns1/crs1", + }, + softOwners: []string{ "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster2", }, }, @@ -1647,7 +1733,7 @@ func TestObjectGraph_addObj_WithFakeObjects(t *testing.T) { g := NewWithT(t) graph, err := getDetachedObjectGraphWihObjs(tt.args.objs) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // call setSoftOwnership so there is functional parity with discovery graph.setSoftOwnership() @@ -1688,39 +1774,29 @@ func getFakeProxyWithCRDs() *test.FakeProxy { return proxy } -func getFakeDiscoveryTypes(graph *objectGraph) error { - if err := graph.getDiscoveryTypes(); err != nil { - return err - } - - // Given that the Fake client behaves in a different way than real client, for this test we are required to add the List suffix to all the types. - for _, discoveryType := range graph.types { - discoveryType.typeMeta.Kind = fmt.Sprintf("%sList", discoveryType.typeMeta.Kind) - } - return nil -} - func TestObjectGraph_Discovery(t *testing.T) { // NB. we are testing the graph is properly built starting from objects (TestGraphBuilder_addObj_WithFakeObjects) or from the same objects read from the cluster (this test). for _, tt := range objectGraphsTests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound to a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.args.objs) // Get all the types to be considered for discovery - err := getFakeDiscoveryTypes(graph) - g.Expect(err).NotTo(HaveOccurred()) + err := graph.getDiscoveryTypes(ctx) + g.Expect(err).ToNot(HaveOccurred()) // finally test discovery - err = graph.Discovery("") + err = graph.Discovery(ctx, "") if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) assertGraph(t, graph, tt.want) }) } @@ -1862,21 +1938,23 @@ func TestObjectGraph_DiscoveryByNamespace(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound to a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.args.objs) // Get all the types to be considered for discovery - err := getFakeDiscoveryTypes(graph) - g.Expect(err).NotTo(HaveOccurred()) + err := graph.getDiscoveryTypes(ctx) + g.Expect(err).ToNot(HaveOccurred()) // finally test discovery - err = graph.Discovery(tt.args.namespace) + err = graph.Discovery(ctx, tt.args.namespace) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) assertGraph(t, graph, tt.want) }) } @@ -1887,32 +1965,148 @@ func Test_objectGraph_setSoftOwnership(t *testing.T) { objs []client.Object } tests := []struct { - name string - fields fields - wantSecrets map[string][]string + name string + fields fields + want wantGraph }{ { name: "A cluster with a soft owned secret", fields: fields{ - objs: test.NewFakeCluster("ns1", "foo").Objs(), + objs: test.NewFakeCluster("ns1", "cluster1").Objs(), + }, + want: wantGraph{ + nodes: map[string]wantGraphItem{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1": { + forceMove: true, + forceMoveHierarchy: true, + }, + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + }, + "/v1, Kind=Secret, ns1/cluster1-ca": { // the ca secret has no explicit OwnerRef to the cluster, so it should be identified as a soft ownership + softOwners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + }, + "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { // the kubeconfig secret has explicit OwnerRef to the cluster, so it should NOT be identified as a soft ownership + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + }, + }, }, - wantSecrets: map[string][]string{ // wantSecrets is a map[node UID] --> list of soft owner UIDs - "/v1, Kind=Secret, ns1/foo-ca": { // the ca secret has no explicit OwnerRef to the cluster, so it should be identified as a soft ownership - "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo", + }, + { + name: "A ClusterClass with a soft owned Cluster", + fields: fields{ + objs: func() []client.Object { + objs := test.NewFakeClusterClass("ns1", "class1").Objs() + objs = append(objs, test.NewFakeCluster("ns1", "cluster1").WithTopologyClass("class1").Objs()...) + + return objs + }(), + }, + want: wantGraph{ + nodes: map[string]wantGraphItem{ + "cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1": { + forceMove: true, + forceMoveHierarchy: true, + }, + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureClusterTemplate, ns1/class1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1", + }, + }, + "controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlaneTemplate, ns1/class1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1", + }, + }, + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1": { + forceMove: true, + forceMoveHierarchy: true, + softOwners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1", // NB. this cluster is not linked to the clusterclass through owner ref, but it is detected as soft ownership + }, + }, + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + }, + "/v1, Kind=Secret, ns1/cluster1-ca": { + softOwners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref, but it is detected as soft ownership + }, + }, + "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + }, }, - "/v1, Kind=Secret, ns1/foo-kubeconfig": {}, // the kubeconfig secret has explicit OwnerRef to the cluster, so it should NOT be identified as a soft ownership }, }, { - name: "A cluster with a soft owned secret (cluster name with - in the middle)", + name: "A Cluster with a soft owned ClusterResourceSetBinding", fields: fields{ - objs: test.NewFakeCluster("ns1", "foo-bar").Objs(), + objs: func() []client.Object { + objs := test.NewFakeCluster("ns1", "cluster1").Objs() + objs = append(objs, test.NewFakeClusterResourceSet("ns1", "crs1"). + WithSecret("resource-s1"). + WithConfigMap("resource-c1"). + ApplyToCluster(test.SelectClusterObj(objs, "ns1", "cluster1")). + Objs()...) + + return objs + }(), }, - wantSecrets: map[string][]string{ // wantSecrets is a map[node UID] --> list of soft owner UIDs - "/v1, Kind=Secret, ns1/foo-bar-ca": { // the ca secret has no explicit OwnerRef to the cluster, so it should be identified as a soft ownership - "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo-bar", + want: wantGraph{ + nodes: map[string]wantGraphItem{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1": { + forceMove: true, + forceMoveHierarchy: true, + }, + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + }, + "/v1, Kind=Secret, ns1/cluster1-ca": { + softOwners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref, but it is detected as soft ownership + }, + }, + "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { + owners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", + }, + }, + "addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSet, ns1/crs1": { + forceMove: true, + forceMoveHierarchy: true, + }, + "addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSetBinding, ns1/cluster1": { + owners: []string{ + "addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSet, ns1/crs1", + }, + softOwners: []string{ + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1", // NB. this ClusterResourceSetBinding is not linked to the cluster through owner ref, but it is detected as soft ownership + }, + }, + "/v1, Kind=Secret, ns1/resource-s1": { + owners: []string{ + "addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSet, ns1/crs1", + }, + }, + "/v1, Kind=ConfigMap, ns1/resource-c1": { + owners: []string{ + "addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSet, ns1/crs1", + }, + }, }, - "/v1, Kind=Secret, ns1/foo-bar-kubeconfig": {}, // the kubeconfig secret has explicit OwnerRef to the cluster, so it should NOT be identified as a soft ownership }, }, } @@ -1921,24 +2115,11 @@ func Test_objectGraph_setSoftOwnership(t *testing.T) { g := NewWithT(t) graph, err := getDetachedObjectGraphWihObjs(tt.fields.objs) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) graph.setSoftOwnership() - gotSecrets := graph.getSecrets() - g.Expect(gotSecrets).To(HaveLen(len(tt.wantSecrets))) - - for _, secret := range gotSecrets { - wantObjects, ok := tt.wantSecrets[string(secret.identity.UID)] - g.Expect(ok).To(BeTrue()) - - gotObjects := []string{} - for softOwners := range secret.softOwners { - gotObjects = append(gotObjects, string(softOwners.identity.UID)) - } - - g.Expect(gotObjects).To(ConsistOf(wantObjects)) - } + assertGraph(t, graph, tt.want) }) } } @@ -2137,7 +2318,7 @@ func Test_objectGraph_setClusterTenants(t *testing.T) { g := NewWithT(t) gb, err := getDetachedObjectGraphWihObjs(tt.fields.objs) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // we want to check that soft dependent nodes are considered part of the cluster, so we make sure to call SetSoftDependants before SetClusterTenants gb.setSoftOwnership() @@ -2240,7 +2421,7 @@ func Test_objectGraph_setCRSTenants(t *testing.T) { g := NewWithT(t) gb, err := getDetachedObjectGraphWihObjs(tt.fields.objs) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) gb.setTenants() @@ -2300,7 +2481,7 @@ func Test_objectGraph_setGlobalIdentityTenants(t *testing.T) { g := NewWithT(t) gb, err := getDetachedObjectGraphWihObjs(tt.fields.objs) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) gb.setTenants() diff --git a/cmd/clusterctl/client/cluster/ownergraph.go b/cmd/clusterctl/client/cluster/ownergraph.go index fc12f915005e..2049e8743eac 100644 --- a/cmd/clusterctl/client/cluster/ownergraph.go +++ b/cmd/clusterctl/client/cluster/ownergraph.go @@ -17,9 +17,16 @@ limitations under the License. package cluster import ( + "context" + "strings" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" ) // OwnerGraph contains a graph with all the objects considered by clusterctl move as nodes and the OwnerReference relationship @@ -32,52 +39,123 @@ type OwnerGraphNode struct { Owners []metav1.OwnerReference } -func nodeToOwnerRef(n *node, attributes ownerReferenceAttributes) metav1.OwnerReference { - ref := metav1.OwnerReference{ - Name: n.identity.Name, - APIVersion: n.identity.APIVersion, - Kind: n.identity.Kind, - UID: n.identity.UID, - } - if attributes.BlockOwnerDeletion != nil { - ref.BlockOwnerDeletion = attributes.BlockOwnerDeletion - } - if attributes.Controller != nil { - ref.Controller = attributes.Controller +// GetOwnerGraphFilterFunction allows filtering the objects returned by GetOwnerGraph. +// The function has to return true for objects which should be kept. +// NOTE: this function signature is exposed to allow implementation of E2E tests; there is +// no guarantee about the stability of this API. +type GetOwnerGraphFilterFunction func(u unstructured.Unstructured) bool + +// FilterClusterObjectsWithNameFilter is used in e2e tests where the owner graph +// gets queried to filter out cluster-wide objects which don't have the s in their +// object name. This avoids assertions on objects which are part of in-parallel +// running tests like ExtensionConfig. +// NOTE: this function signature is exposed to allow implementation of E2E tests; there is +// no guarantee about the stability of this API. +func FilterClusterObjectsWithNameFilter(s string) func(u unstructured.Unstructured) bool { + return func(u unstructured.Unstructured) bool { + // Ignore cluster-wide objects which don't have the clusterName in their object + // name to avoid asserting on cluster-wide objects which get created or deleted + // by tests which run in-parallel (e.g. ExtensionConfig). + if u.GetNamespace() == "" && !strings.Contains(u.GetName(), s) { + return false + } + return true } - return ref } // GetOwnerGraph returns a graph with all the objects considered by clusterctl move as nodes and the OwnerReference relationship between those objects as edges. // NOTE: this data structure is exposed to allow implementation of E2E tests verifying that CAPI can properly rebuild its // own owner references; there is no guarantee about the stability of this API. Using this test with providers may require // a custom implementation of this function, or the OwnerGraph it returns. -func GetOwnerGraph(namespace, kubeconfigPath string) (OwnerGraph, error) { +func GetOwnerGraph(ctx context.Context, namespace, kubeconfigPath string, filterFn GetOwnerGraphFilterFunction) (OwnerGraph, error) { p := newProxy(Kubeconfig{Path: kubeconfigPath, Context: ""}) invClient := newInventoryClient(p, nil) graph := newObjectGraph(p, invClient) // Gets all the types defined by the CRDs installed by clusterctl plus the ConfigMap/Secret core types. - err := graph.getDiscoveryTypes() + err := graph.getDiscoveryTypes(ctx) if err != nil { return OwnerGraph{}, errors.Wrap(err, "failed to retrieve discovery types") } - // Discovery the object graph for the selected types: - // - Nodes are defined the Kubernetes objects (Clusters, Machines etc.) identified during the discovery process. - // - Edges are derived by the OwnerReferences between nodes. - if err := graph.Discovery(namespace); err != nil { - return OwnerGraph{}, errors.Wrap(err, "failed to discover the object graph") + // graph.Discovery can not be used here as it will use the latest APIVersion for ownerReferences - not those + // present in the object `metadata.ownerReferences`. + owners, err := discoverOwnerGraph(ctx, namespace, graph, filterFn) + if err != nil { + return OwnerGraph{}, errors.Wrap(err, "failed to discovery ownerGraph types") + } + return owners, nil +} + +func discoverOwnerGraph(ctx context.Context, namespace string, o *objectGraph, filterFn GetOwnerGraphFilterFunction) (OwnerGraph, error) { + selectors := []client.ListOption{} + if namespace != "" { + selectors = append(selectors, client.InNamespace(namespace)) } - owners := OwnerGraph{} - // Using getMoveNodes here ensures only objects that are part of the Cluster are added to the OwnerGraph. - for _, v := range graph.getMoveNodes() { - n := OwnerGraphNode{Object: v.identity, Owners: []metav1.OwnerReference{}} - for owner, attributes := range v.owners { - n.Owners = append(n.Owners, nodeToOwnerRef(owner, attributes)) + ownerGraph := OwnerGraph{} + + discoveryBackoff := newReadBackoff() + for _, discoveryType := range o.types { + typeMeta := discoveryType.typeMeta + objList := new(unstructured.UnstructuredList) + + if err := retryWithExponentialBackoff(ctx, discoveryBackoff, func(ctx context.Context) error { + return getObjList(ctx, o.proxy, typeMeta, selectors, objList) + }); err != nil { + return nil, err + } + + // if we are discovering Secrets, also secrets from the providers namespace should be included. + if discoveryType.typeMeta.GetObjectKind().GroupVersionKind().GroupKind() == corev1.SchemeGroupVersion.WithKind("SecretList").GroupKind() { + providers, err := o.providerInventory.List(ctx) + if err != nil { + return nil, err + } + for _, p := range providers.Items { + if p.Type == string(clusterctlv1.InfrastructureProviderType) { + providerNamespaceSelector := []client.ListOption{client.InNamespace(p.Namespace)} + providerNamespaceSecretList := new(unstructured.UnstructuredList) + if err := retryWithExponentialBackoff(ctx, discoveryBackoff, func(ctx context.Context) error { + return getObjList(ctx, o.proxy, typeMeta, providerNamespaceSelector, providerNamespaceSecretList) + }); err != nil { + return nil, err + } + objList.Items = append(objList.Items, providerNamespaceSecretList.Items...) + } + } + } + for _, obj := range objList.Items { + // Exclude the objects via the filter function. + if filterFn != nil && !filterFn(obj) { + continue + } + // Exclude the kube-root-ca.crt ConfigMap from the owner graph. + if obj.GetKind() == "ConfigMap" && obj.GetName() == "kube-root-ca.crt" { + continue + } + // Exclude the default service account from the owner graph. + // This Secret is no longer generated by default in Kubernetes 1.24+. + // This is not a CAPI related Secret, so it can be ignored. + if obj.GetKind() == "Secret" && strings.Contains(obj.GetName(), "default-token") { + continue + } + ownerGraph = addNodeToOwnerGraph(ownerGraph, obj) } - owners[string(v.identity.UID)] = n } - return owners, nil + return ownerGraph, nil +} + +func addNodeToOwnerGraph(graph OwnerGraph, obj unstructured.Unstructured) OwnerGraph { + // write code to add a node to the ownerGraph + graph[string(obj.GetUID())] = OwnerGraphNode{ + Owners: obj.GetOwnerReferences(), + Object: corev1.ObjectReference{ + APIVersion: obj.GetAPIVersion(), + Kind: obj.GetKind(), + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + }, + } + return graph } diff --git a/cmd/clusterctl/client/cluster/proxy.go b/cmd/clusterctl/client/cluster/proxy.go index 5c114beac458..0cc13b589af8 100644 --- a/cmd/clusterctl/client/cluster/proxy.go +++ b/cmd/clusterctl/client/cluster/proxy.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "os" "strconv" @@ -57,10 +58,10 @@ type Proxy interface { ValidateKubernetesVersion() error // NewClient returns a new controller runtime Client object for working on the management cluster. - NewClient() (client.Client, error) + NewClient(ctx context.Context) (client.Client, error) // CheckClusterAvailable checks if a cluster is available and reachable. - CheckClusterAvailable() error + CheckClusterAvailable(ctx context.Context) error // ListResources lists namespaced and cluster-wide resources for a component matching the labels. Namespaced resources are only listed // in the given namespaces. @@ -68,13 +69,13 @@ type Proxy interface { // Certificates for cert-manager, Clusters for CAPI, AWSCluster for CAPA and so on). // This is done to avoid errors when listing resources of providers which have already been deleted/scaled down to 0 replicas/with // malfunctioning webhooks. - ListResources(labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) + ListResources(ctx context.Context, labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) // GetContexts returns the list of contexts in kubeconfig which begin with prefix. GetContexts(prefix string) ([]string, error) // GetResourceNames returns the list of resource names which begin with prefix. - GetResourceNames(groupVersion, kind string, options []client.ListOption, prefix string) ([]string, error) + GetResourceNames(ctx context.Context, groupVersion, kind string, options []client.ListOption, prefix string) ([]string, error) } type proxy struct { @@ -155,7 +156,7 @@ func (k *proxy) GetConfig() (*rest.Config, error) { return restConfig, nil } -func (k *proxy) NewClient() (client.Client, error) { +func (k *proxy) NewClient(ctx context.Context) (client.Client, error) { config, err := k.GetConfig() if err != nil { return nil, err @@ -164,7 +165,7 @@ func (k *proxy) NewClient() (client.Client, error) { var c client.Client // Nb. The operation is wrapped in a retry loop to make newClientSet more resilient to temporary connection problems. connectBackoff := newConnectBackoff() - if err := retryWithExponentialBackoff(connectBackoff, func() error { + if err := retryWithExponentialBackoff(ctx, connectBackoff, func(_ context.Context) error { var err error c, err = client.New(config, client.Options{Scheme: localScheme}) if err != nil { @@ -178,7 +179,7 @@ func (k *proxy) NewClient() (client.Client, error) { return c, nil } -func (k *proxy) CheckClusterAvailable() error { +func (k *proxy) CheckClusterAvailable(ctx context.Context) error { // Check if the cluster is available by creating a client to the cluster. // If creating the client times out and never established we assume that // the cluster does not exist or is not reachable. @@ -190,7 +191,7 @@ func (k *proxy) CheckClusterAvailable() error { } connectBackoff := newShortConnectBackoff() - return retryWithExponentialBackoff(connectBackoff, func() error { + return retryWithExponentialBackoff(ctx, connectBackoff, func(_ context.Context) error { _, err := client.New(config, client.Options{Scheme: localScheme}) return err }) @@ -209,13 +210,13 @@ func (k *proxy) CheckClusterAvailable() error { // - If we now want to delete e.g. the kubeadm bootstrap provider, we cannot list AWSClusterControllerIdentity resources // as the conversion would fail, because the AWS controller hosting the conversion webhook has already been deleted. // - Thus we exclude resources of other providers if we detect that ListResources is called to list resources of a provider. -func (k *proxy) ListResources(labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) { - cs, err := k.newClientSet() +func (k *proxy) ListResources(ctx context.Context, labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) { + cs, err := k.newClientSet(ctx) if err != nil { return nil, err } - c, err := k.NewClient() + c, err := k.NewClient(ctx) if err != nil { return nil, err } @@ -223,7 +224,7 @@ func (k *proxy) ListResources(labels map[string]string, namespaces ...string) ([ // Get all the API resources in the cluster. resourceListBackoff := newReadBackoff() var resourceList []*metav1.APIResourceList - if err := retryWithExponentialBackoff(resourceListBackoff, func() error { + if err := retryWithExponentialBackoff(ctx, resourceListBackoff, func(context.Context) error { resourceList, err = cs.Discovery().ServerPreferredResources() return err }); err != nil { @@ -235,7 +236,7 @@ func (k *proxy) ListResources(labels map[string]string, namespaces ...string) ([ crdsToExclude := sets.Set[string]{} crdList := &apiextensionsv1.CustomResourceDefinitionList{} - if err := retryWithExponentialBackoff(newReadBackoff(), func() error { + if err := retryWithExponentialBackoff(ctx, newReadBackoff(), func(ctx context.Context) error { return c.List(ctx, crdList) }); err != nil { return nil, errors.Wrap(err, "failed to list CRDs") @@ -282,14 +283,14 @@ func (k *proxy) ListResources(labels map[string]string, namespaces ...string) ([ // List all the object instances of this resourceKind with the given labels if resourceKind.Namespaced { for _, namespace := range namespaces { - objList, err := listObjByGVK(c, resourceGroup.GroupVersion, resourceKind.Kind, []client.ListOption{client.MatchingLabels(labels), client.InNamespace(namespace)}) + objList, err := listObjByGVK(ctx, c, resourceGroup.GroupVersion, resourceKind.Kind, []client.ListOption{client.MatchingLabels(labels), client.InNamespace(namespace)}) if err != nil { return nil, err } ret = append(ret, objList.Items...) } } else { - objList, err := listObjByGVK(c, resourceGroup.GroupVersion, resourceKind.Kind, []client.ListOption{client.MatchingLabels(labels)}) + objList, err := listObjByGVK(ctx, c, resourceGroup.GroupVersion, resourceKind.Kind, []client.ListOption{client.MatchingLabels(labels)}) if err != nil { return nil, err } @@ -318,13 +319,13 @@ func (k *proxy) GetContexts(prefix string) ([]string, error) { } // GetResourceNames returns the list of resource names which begin with prefix. -func (k *proxy) GetResourceNames(groupVersion, kind string, options []client.ListOption, prefix string) ([]string, error) { - client, err := k.NewClient() +func (k *proxy) GetResourceNames(ctx context.Context, groupVersion, kind string, options []client.ListOption, prefix string) ([]string, error) { + client, err := k.NewClient(ctx) if err != nil { return nil, err } - objList, err := listObjByGVK(client, groupVersion, kind, options) + objList, err := listObjByGVK(ctx, client, groupVersion, kind, options) if err != nil { return nil, err } @@ -341,13 +342,13 @@ func (k *proxy) GetResourceNames(groupVersion, kind string, options []client.Lis return comps, nil } -func listObjByGVK(c client.Client, groupVersion, kind string, options []client.ListOption) (*unstructured.UnstructuredList, error) { +func listObjByGVK(ctx context.Context, c client.Client, groupVersion, kind string, options []client.ListOption) (*unstructured.UnstructuredList, error) { objList := new(unstructured.UnstructuredList) objList.SetAPIVersion(groupVersion) objList.SetKind(kind) resourceListBackoff := newReadBackoff() - if err := retryWithExponentialBackoff(resourceListBackoff, func() error { + if err := retryWithExponentialBackoff(ctx, resourceListBackoff, func(ctx context.Context) error { return c.List(ctx, objList, options...) }); err != nil { return nil, errors.Wrapf(err, "failed to list objects for the %q GroupVersionKind", objList.GroupVersionKind()) @@ -392,7 +393,7 @@ func newProxy(kubeconfig Kubeconfig, opts ...ProxyOption) Proxy { return p } -func (k *proxy) newClientSet() (*kubernetes.Clientset, error) { +func (k *proxy) newClientSet(ctx context.Context) (*kubernetes.Clientset, error) { config, err := k.GetConfig() if err != nil { return nil, err @@ -401,7 +402,7 @@ func (k *proxy) newClientSet() (*kubernetes.Clientset, error) { var cs *kubernetes.Clientset // Nb. The operation is wrapped in a retry loop to make newClientSet more resilient to temporary connection problems. connectBackoff := newConnectBackoff() - if err := retryWithExponentialBackoff(connectBackoff, func() error { + if err := retryWithExponentialBackoff(ctx, connectBackoff, func(_ context.Context) error { var err error cs, err = kubernetes.NewForConfig(config) if err != nil { diff --git a/cmd/clusterctl/client/cluster/proxy_test.go b/cmd/clusterctl/client/cluster/proxy_test.go index dfa55f029512..94db7106d72b 100644 --- a/cmd/clusterctl/client/cluster/proxy_test.go +++ b/cmd/clusterctl/client/cluster/proxy_test.go @@ -64,7 +64,7 @@ func TestProxyGetConfig(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) configFile := filepath.Join(dir, ".test-kubeconfig.yaml") g.Expect(os.WriteFile(configFile, []byte(tt.kubeconfigContents), 0600)).To(Succeed()) @@ -91,7 +91,7 @@ func TestProxyGetConfig(t *testing.T) { t.Run("configure timeout", func(t *testing.T) { g := NewWithT(t) dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) configFile := filepath.Join(dir, ".test-kubeconfig.yaml") g.Expect(os.WriteFile(configFile, []byte(kubeconfig("management", "default")), 0600)).To(Succeed()) @@ -118,7 +118,7 @@ func TestKUBECONFIGEnvVar(t *testing.T) { g := NewWithT(t) dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) configFile := filepath.Join(dir, ".test-kubeconfig.yaml") g.Expect(os.WriteFile(configFile, []byte(kubeconfigContents), 0600)).To(Succeed()) @@ -146,7 +146,7 @@ func TestKUBECONFIGEnvVar(t *testing.T) { ) g := NewWithT(t) dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) configFile := filepath.Join(dir, ".test-kubeconfig.yaml") g.Expect(os.WriteFile(configFile, []byte(kubeconfigContents), 0600)).To(Succeed()) @@ -223,7 +223,7 @@ func TestProxyCurrentNamespace(t *testing.T) { configFile = tt.kubeconfigPath } else { dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) configFile = filepath.Join(dir, ".test-kubeconfig.yaml") g.Expect(os.WriteFile(configFile, []byte(tt.kubeconfigContents), 0600)).To(Succeed()) diff --git a/cmd/clusterctl/client/cluster/template.go b/cmd/clusterctl/client/cluster/template.go index f6cffbf56f30..872fd4eed6c2 100644 --- a/cmd/clusterctl/client/cluster/template.go +++ b/cmd/clusterctl/client/cluster/template.go @@ -26,7 +26,7 @@ import ( "os" "strings" - "github.com/google/go-github/v48/github" + "github.com/google/go-github/v53/github" "github.com/pkg/errors" "golang.org/x/oauth2" corev1 "k8s.io/api/core/v1" @@ -40,17 +40,17 @@ import ( // TemplateClient has methods to work with templates stored in the cluster/out of the provider repository. type TemplateClient interface { // GetFromConfigMap returns a workload cluster template from the given ConfigMap. - GetFromConfigMap(namespace, name, dataKey, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) + GetFromConfigMap(ctx context.Context, namespace, name, dataKey, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) // GetFromURL returns a workload cluster template from the given URL. - GetFromURL(templateURL, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) + GetFromURL(ctx context.Context, templateURL, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) } // templateClient implements TemplateClient. type templateClient struct { proxy Proxy configClient config.Client - gitHubClientFactory func(configVariablesClient config.VariablesClient) (*github.Client, error) + gitHubClientFactory func(ctx context.Context, configVariablesClient config.VariablesClient) (*github.Client, error) processor yaml.Processor httpClient *http.Client } @@ -76,7 +76,7 @@ func newTemplateClient(input TemplateClientInput) *templateClient { } } -func (t *templateClient) GetFromConfigMap(configMapNamespace, configMapName, configMapDataKey, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { +func (t *templateClient) GetFromConfigMap(ctx context.Context, configMapNamespace, configMapName, configMapDataKey, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { if configMapNamespace == "" { return nil, errors.New("invalid GetFromConfigMap operation: missing configMapNamespace value") } @@ -84,7 +84,7 @@ func (t *templateClient) GetFromConfigMap(configMapNamespace, configMapName, con return nil, errors.New("invalid GetFromConfigMap operation: missing configMapName value") } - c, err := t.proxy.NewClient() + c, err := t.proxy.NewClient(ctx) if err != nil { return nil, err } @@ -113,12 +113,12 @@ func (t *templateClient) GetFromConfigMap(configMapNamespace, configMapName, con }) } -func (t *templateClient) GetFromURL(templateURL, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { +func (t *templateClient) GetFromURL(ctx context.Context, templateURL, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { if templateURL == "" { return nil, errors.New("invalid GetFromURL operation: missing templateURL value") } - content, err := t.getURLContent(templateURL) + content, err := t.getURLContent(ctx, templateURL) if err != nil { return nil, errors.Wrapf(err, "invalid GetFromURL operation") } @@ -132,7 +132,7 @@ func (t *templateClient) GetFromURL(templateURL, targetNamespace string, skipTem }) } -func (t *templateClient) getURLContent(templateURL string) ([]byte, error) { +func (t *templateClient) getURLContent(ctx context.Context, templateURL string) ([]byte, error) { if templateURL == "-" { b, err := io.ReadAll(os.Stdin) if err != nil { @@ -148,9 +148,9 @@ func (t *templateClient) getURLContent(templateURL string) ([]byte, error) { if rURL.Scheme == "https" { if rURL.Host == "github.com" { - return t.getGitHubFileContent(rURL) + return t.getGitHubFileContent(ctx, rURL) } - return t.getRawURLFileContent(templateURL) + return t.getRawURLFileContent(ctx, templateURL) } if rURL.Scheme == "file" || rURL.Scheme == "" { @@ -176,7 +176,7 @@ func (t *templateClient) getLocalFileContent(rURL *url.URL) ([]byte, error) { return content, nil } -func (t *templateClient) getGitHubFileContent(rURL *url.URL) ([]byte, error) { +func (t *templateClient) getGitHubFileContent(ctx context.Context, rURL *url.URL) ([]byte, error) { // Check if the path is in the expected format, urlSplit := strings.Split(strings.TrimPrefix(rURL.Path, "/"), "/") if len(urlSplit) < 5 { @@ -193,7 +193,7 @@ func (t *templateClient) getGitHubFileContent(rURL *url.URL) ([]byte, error) { linkType := urlSplit[2] // gets the GitHub client - ghClient, err := t.gitHubClientFactory(t.configClient.Variables()) + ghClient, err := t.gitHubClientFactory(ctx, t.configClient.Variables()) if err != nil { return nil, err } @@ -204,7 +204,7 @@ func (t *templateClient) getGitHubFileContent(rURL *url.URL) ([]byte, error) { branch := urlSplit[3] path := strings.Join(urlSplit[4:], "/") - return getGithubFileContentFromCode(ghClient, rURL.Path, owner, repo, path, branch) + return getGithubFileContentFromCode(ctx, ghClient, rURL.Path, owner, repo, path, branch) case "releases": // get a github release asset if urlSplit[3] != "download" { @@ -213,13 +213,13 @@ func (t *templateClient) getGitHubFileContent(rURL *url.URL) ([]byte, error) { tag := urlSplit[4] assetName := urlSplit[5] - return getGithubAssetFromRelease(ghClient, rURL.Path, owner, repo, tag, assetName) + return getGithubAssetFromRelease(ctx, ghClient, rURL.Path, owner, repo, tag, assetName) } return nil, fmt.Errorf("unknown github URL: %v", rURL) } -func getGithubFileContentFromCode(ghClient *github.Client, fullPath string, owner string, repo string, path string, branch string) ([]byte, error) { +func getGithubFileContentFromCode(ctx context.Context, ghClient *github.Client, fullPath string, owner string, repo string, path string, branch string) ([]byte, error) { fileContent, _, _, err := ghClient.Repositories.GetContents(ctx, owner, repo, path, &github.RepositoryContentGetOptions{Ref: branch}) if err != nil { return nil, handleGithubErr(err, "failed to get %q", fullPath) @@ -237,7 +237,7 @@ func getGithubFileContentFromCode(ghClient *github.Client, fullPath string, owne return content, nil } -func (t *templateClient) getRawURLFileContent(rURL string) ([]byte, error) { +func (t *templateClient) getRawURLFileContent(ctx context.Context, rURL string) ([]byte, error) { request, err := http.NewRequestWithContext(ctx, http.MethodGet, rURL, http.NoBody) if err != nil { return nil, err @@ -261,7 +261,7 @@ func (t *templateClient) getRawURLFileContent(rURL string) ([]byte, error) { return content, nil } -func getGithubAssetFromRelease(ghClient *github.Client, path string, owner string, repo string, tag string, assetName string) ([]byte, error) { +func getGithubAssetFromRelease(ctx context.Context, ghClient *github.Client, path string, owner string, repo string, tag string, assetName string) ([]byte, error) { release, _, err := ghClient.Repositories.GetReleaseByTag(ctx, owner, repo, tag) if err != nil { return nil, handleGithubErr(err, "failed to get release '%s' from %s/%s repository", tag, owner, repo) @@ -291,13 +291,13 @@ func getGithubAssetFromRelease(ghClient *github.Client, path string, owner strin return io.ReadAll(rc) } -func getGitHubClient(configVariablesClient config.VariablesClient) (*github.Client, error) { +func getGitHubClient(ctx context.Context, configVariablesClient config.VariablesClient) (*github.Client, error) { var authenticatingHTTPClient *http.Client if token, err := configVariablesClient.Get(config.GitHubTokenVariable); err == nil { ts := oauth2.StaticTokenSource( &oauth2.Token{AccessToken: token}, ) - authenticatingHTTPClient = oauth2.NewClient(context.TODO(), ts) + authenticatingHTTPClient = oauth2.NewClient(ctx, ts) } return github.NewClient(authenticatingHTTPClient), nil diff --git a/cmd/clusterctl/client/cluster/template_test.go b/cmd/clusterctl/client/cluster/template_test.go index 7f32862e48ea..79c6a809711f 100644 --- a/cmd/clusterctl/client/cluster/template_test.go +++ b/cmd/clusterctl/client/cluster/template_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "encoding/base64" "fmt" "net/http" @@ -26,7 +27,7 @@ import ( "path/filepath" "testing" - "github.com/google/go-github/v48/github" + "github.com/google/go-github/v53/github" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -46,8 +47,8 @@ kind: Machine` func Test_templateClient_GetFromConfigMap(t *testing.T) { g := NewWithT(t) - configClient, err := config.New("", config.InjectReader(test.NewFakeReader())) - g.Expect(err).NotTo(HaveOccurred()) + configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader())) + g.Expect(err).ToNot(HaveOccurred()) configMap := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ @@ -134,14 +135,16 @@ func Test_templateClient_GetFromConfigMap(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + processor := yaml.NewSimpleProcessor() tc := newTemplateClient(TemplateClientInput{tt.fields.proxy, tt.fields.configClient, processor}) - got, err := tc.GetFromConfigMap(tt.args.configMapNamespace, tt.args.configMapName, tt.args.configMapDataKey, tt.args.targetNamespace, tt.args.skipTemplateProcess) + got, err := tc.GetFromConfigMap(ctx, tt.args.configMapNamespace, tt.args.configMapName, tt.args.configMapDataKey, tt.args.targetNamespace, tt.args.skipTemplateProcess) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) wantTemplate, err := repository.NewTemplate(repository.TemplateInput{ RawArtifact: []byte(tt.want), @@ -150,7 +153,7 @@ func Test_templateClient_GetFromConfigMap(t *testing.T) { TargetNamespace: tt.args.targetNamespace, SkipTemplateProcess: tt.args.skipTemplateProcess, }) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(wantTemplate)) }) } @@ -162,10 +165,10 @@ func Test_templateClient_getGitHubFileContent(t *testing.T) { client, mux, teardown := test.NewFakeGitHub() defer teardown() - configClient, err := config.New("", config.InjectReader(test.NewFakeReader())) - g.Expect(err).NotTo(HaveOccurred()) + configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader())) + g.Expect(err).ToNot(HaveOccurred()) - mux.HandleFunc("/repos/kubernetes-sigs/cluster-api/contents/config/default/cluster-template.yaml", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/kubernetes-sigs/cluster-api/contents/config/default/cluster-template.yaml", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, `{ "type": "file", "encoding": "base64", @@ -207,19 +210,21 @@ func Test_templateClient_getGitHubFileContent(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + c := &templateClient{ configClient: configClient, - gitHubClientFactory: func(configVariablesClient config.VariablesClient) (*github.Client, error) { + gitHubClientFactory: func(context.Context, config.VariablesClient) (*github.Client, error) { return client, nil }, } - got, err := c.getGitHubFileContent(tt.args.rURL) + got, err := c.getGitHubFileContent(ctx, tt.args.rURL) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) @@ -227,7 +232,7 @@ func Test_templateClient_getGitHubFileContent(t *testing.T) { } func Test_templateClient_getRawUrlFileContent(t *testing.T) { - fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, template) })) @@ -255,14 +260,16 @@ func Test_templateClient_getRawUrlFileContent(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + c := newTemplateClient(TemplateClientInput{}) - got, err := c.getRawURLFileContent(tt.args.rURL) + got, err := c.getRawURLFileContent(ctx, tt.args.rURL) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) @@ -273,7 +280,7 @@ func Test_templateClient_getLocalFileContent(t *testing.T) { g := NewWithT(t) tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(tmpDir) path := filepath.Join(tmpDir, "cluster-template.yaml") @@ -316,7 +323,7 @@ func Test_templateClient_getLocalFileContent(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) @@ -327,16 +334,16 @@ func Test_templateClient_GetFromURL(t *testing.T) { g := NewWithT(t) tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(tmpDir) - configClient, err := config.New("", config.InjectReader(test.NewFakeReader())) - g.Expect(err).NotTo(HaveOccurred()) + configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader())) + g.Expect(err).ToNot(HaveOccurred()) fakeGithubClient, mux, teardown := test.NewFakeGitHub() defer teardown() - mux.HandleFunc("/repos/kubernetes-sigs/cluster-api/contents/config/default/cluster-template.yaml", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/kubernetes-sigs/cluster-api/contents/config/default/cluster-template.yaml", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, `{ "type": "file", "encoding": "base64", @@ -348,7 +355,7 @@ func Test_templateClient_GetFromURL(t *testing.T) { }`) }) - mux.HandleFunc("/repos/some-owner/some-repo/releases/tags/v1.0.0", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/some-owner/some-repo/releases/tags/v1.0.0", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, `{ "tag_name": "v1.0.0", "name": "v1.0.0", @@ -363,11 +370,11 @@ func Test_templateClient_GetFromURL(t *testing.T) { }`) }) - mux.HandleFunc("/repos/some-owner/some-repo/releases/assets/87654321", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/some-owner/some-repo/releases/assets/87654321", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, template) }) - mux.HandleFunc("/repos/some-owner/some-repo/releases/tags/v2.0.0", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/some-owner/some-repo/releases/tags/v2.0.0", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, `{ "tag_name": "v2.0.0", "name": "v2.0.0", @@ -383,14 +390,14 @@ func Test_templateClient_GetFromURL(t *testing.T) { }) // redirect asset - mux.HandleFunc("/repos/some-owner/some-repo/releases/assets/22222222", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/some-owner/some-repo/releases/assets/22222222", func(w http.ResponseWriter, _ *http.Request) { // add the "/api-v3" prefix to match the prefix of the fake github server w.Header().Add("Location", "/api-v3/redirected/22222222") w.WriteHeader(http.StatusFound) }) // redirect location - mux.HandleFunc("/redirected/22222222", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/redirected/22222222", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, template) }) @@ -401,7 +408,7 @@ func Test_templateClient_GetFromURL(t *testing.T) { saveStdin := os.Stdin defer func() { os.Stdin = saveStdin }() os.Stdin, err = os.Open(path) //nolint:gosec - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) type args struct { templateURL string @@ -479,7 +486,9 @@ func Test_templateClient_GetFromURL(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - gitHubClientFactory := func(configVariablesClient config.VariablesClient) (*github.Client, error) { + ctx := context.Background() + + gitHubClientFactory := func(context.Context, config.VariablesClient) (*github.Client, error) { return fakeGithubClient, nil } processor := yaml.NewSimpleProcessor() @@ -487,13 +496,13 @@ func Test_templateClient_GetFromURL(t *testing.T) { // override the github client factory c.gitHubClientFactory = gitHubClientFactory - got, err := c.GetFromURL(tt.args.templateURL, tt.args.targetNamespace, tt.args.skipTemplateProcess) + got, err := c.GetFromURL(ctx, tt.args.templateURL, tt.args.targetNamespace, tt.args.skipTemplateProcess) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) wantTemplate, err := repository.NewTemplate(repository.TemplateInput{ RawArtifact: []byte(tt.want), @@ -502,7 +511,7 @@ func Test_templateClient_GetFromURL(t *testing.T) { TargetNamespace: tt.args.targetNamespace, SkipTemplateProcess: tt.args.skipTemplateProcess, }) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(wantTemplate)) }) } diff --git a/cmd/clusterctl/client/cluster/topology.go b/cmd/clusterctl/client/cluster/topology.go index 0242f1262627..08e5d5d16471 100644 --- a/cmd/clusterctl/client/cluster/topology.go +++ b/cmd/clusterctl/client/cluster/topology.go @@ -55,7 +55,7 @@ const ( // TopologyClient has methods to work with ClusterClass and ManagedTopologies. type TopologyClient interface { - Plan(in *TopologyPlanInput) (*TopologyPlanOutput, error) + Plan(ctx context.Context, in *TopologyPlanInput) (*TopologyPlanOutput, error) } // topologyClient implements TopologyClient. @@ -106,8 +106,7 @@ type TopologyPlanOutput struct { // Plan performs a dry run execution of the topology reconciler using the given inputs. // It returns a summary of the changes observed during the execution. -func (t *topologyClient) Plan(in *TopologyPlanInput) (*TopologyPlanOutput, error) { - ctx := context.TODO() +func (t *topologyClient) Plan(ctx context.Context, in *TopologyPlanInput) (*TopologyPlanOutput, error) { log := logf.Log // Make sure the inputs are valid. @@ -121,9 +120,9 @@ func (t *topologyClient) Plan(in *TopologyPlanInput) (*TopologyPlanOutput, error // Example: This client will be used to fetch the underlying ClusterClass when the input // only has a Cluster object. var c client.Client - if err := t.proxy.CheckClusterAvailable(); err == nil { - if initialized, err := t.inventoryClient.CheckCAPIInstalled(); err == nil && initialized { - c, err = t.proxy.NewClient() + if err := t.proxy.CheckClusterAvailable(ctx); err == nil { + if initialized, err := t.inventoryClient.CheckCAPIInstalled(ctx); err == nil && initialized { + c, err = t.proxy.NewClient(ctx) if err != nil { return nil, errors.Wrap(err, "failed to create a client to the cluster") } @@ -264,7 +263,7 @@ func (t *topologyClient) validateInput(in *TopologyPlanInput) error { // - Prepare cluster objects so that the state of the cluster, if modified, correctly represents // the expected changes. func (t *topologyClient) prepareInput(ctx context.Context, in *TopologyPlanInput, apiReader client.Reader) error { - if err := t.setMissingNamespaces(in.TargetNamespace, in.Objs); err != nil { + if err := t.setMissingNamespaces(ctx, in.TargetNamespace, in.Objs); err != nil { return errors.Wrap(err, "failed to set missing namespaces") } @@ -276,12 +275,12 @@ func (t *topologyClient) prepareInput(ctx context.Context, in *TopologyPlanInput // setMissingNamespaces sets the object to the current namespace on objects // that are missing the namespace field. -func (t *topologyClient) setMissingNamespaces(currentNamespace string, objs []*unstructured.Unstructured) error { +func (t *topologyClient) setMissingNamespaces(ctx context.Context, currentNamespace string, objs []*unstructured.Unstructured) error { if currentNamespace == "" { // If TargetNamespace is not provided use "default" namespace. currentNamespace = metav1.NamespaceDefault // If a cluster is available use the current namespace as defined in its kubeconfig. - if err := t.proxy.CheckClusterAvailable(); err == nil { + if err := t.proxy.CheckClusterAvailable(ctx); err == nil { currentNamespace, err = t.proxy.CurrentNamespace() if err != nil { return errors.Wrap(err, "failed to get current namespace") @@ -481,7 +480,7 @@ func (t *topologyClient) reconcileClusterClasses(ctx context.Context, inputObjec // This is required as Clusters are validated based of variable definitions in the ClusterClass `.status.variables`. reconciledClusterClasses := []client.Object{} for _, class := range allClusterClasses { - reconciledClusterClass, err := reconcileClusterClass(apiReader, class, reconciliationObjects) + reconciledClusterClass, err := reconcileClusterClass(ctx, apiReader, class, reconciliationObjects) if err != nil { return nil, errors.Wrapf(err, "ClusterClass %s could not be reconciled for dry run", class.GetName()) } @@ -507,7 +506,7 @@ func (t *topologyClient) reconcileClusterClasses(ctx context.Context, inputObjec return reconciledClusterClasses, nil } -func reconcileClusterClass(apiReader client.Reader, class client.Object, reconciliationObjects []client.Object) (*unstructured.Unstructured, error) { +func reconcileClusterClass(ctx context.Context, apiReader client.Reader, class client.Object, reconciliationObjects []client.Object) (*unstructured.Unstructured, error) { targetClusterClass := client.ObjectKey{Namespace: class.GetNamespace(), Name: class.GetName()} reconciliationObjects = append(reconciliationObjects, class) @@ -517,7 +516,6 @@ func reconcileClusterClass(apiReader client.Reader, class client.Object, reconci clusterClassReconciler := &clusterclasscontroller.Reconciler{ Client: reconcilerClient, - APIReader: reconcilerClient, UnstructuredCachingClient: reconcilerClient, } @@ -567,11 +565,11 @@ func (t *topologyClient) defaultAndValidateObjs(ctx context.Context, objs []*uns } } if oldObject != nil { - if err := validator.ValidateUpdate(ctx, oldObject, object); err != nil { + if _, err := validator.ValidateUpdate(ctx, oldObject, object); err != nil { return errors.Wrapf(err, "failed validation of %s %s/%s", obj.GroupVersionKind().String(), obj.GetNamespace(), obj.GetName()) } } else { - if err := validator.ValidateCreate(ctx, object); err != nil { + if _, err := validator.ValidateCreate(ctx, object); err != nil { return errors.Wrapf(err, "failed validation of %s %s/%s", obj.GroupVersionKind().String(), obj.GetNamespace(), obj.GetName()) } } @@ -803,6 +801,17 @@ func clusterClassUsesTemplate(cc *clusterv1.ClusterClass, templateRef *corev1.Ob } } + for _, mpClass := range cc.Spec.Workers.MachinePools { + // Check the bootstrap ref + if equalRef(mpClass.Template.Bootstrap.Ref, templateRef) { + return true + } + // Check the infrastructure ref. + if equalRef(mpClass.Template.Infrastructure.Ref, templateRef) { + return true + } + } + return false } diff --git a/cmd/clusterctl/client/cluster/topology_test.go b/cmd/clusterctl/client/cluster/topology_test.go index 7509a2bbbaa7..f9994ccad051 100644 --- a/cmd/clusterctl/client/cluster/topology_test.go +++ b/cmd/clusterctl/client/cluster/topology_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" _ "embed" "fmt" "strings" @@ -51,10 +52,14 @@ var ( //go:embed assets/topology-test/modified-my-cluster.yaml modifiedMyClusterYAML []byte - // modifiedDockerMachineTemplateYAML adds metadat to the docker machine used by the control plane template.. + // modifiedDockerMachineTemplateYAML adds metadata to the docker machine used by the control plane template.. //go:embed assets/topology-test/modified-CP-dockermachinetemplate.yaml modifiedDockerMachineTemplateYAML []byte + // modifiedDockerMachinePoolTemplateYAML adds metadata to the docker machine pool used by the control plane template.. + //go:embed assets/topology-test/modified-CP-dockermachinepooltemplate.yaml + modifiedDockerMachinePoolTemplateYAML []byte + //go:embed assets/topology-test/objects-in-different-namespaces.yaml objsInDifferentNamespacesYAML []byte ) @@ -95,12 +100,16 @@ func Test_topologyClient_Plan(t *testing.T) { {kind: "DockerCluster", namespace: "default", namePrefix: "my-cluster-"}, {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-md-0-"}, {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-md-1-"}, - {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-control-plane-"}, - {kind: "KubeadmConfigTemplate", namespace: "default", namePrefix: "my-cluster-md-0-bootstrap-"}, - {kind: "KubeadmConfigTemplate", namespace: "default", namePrefix: "my-cluster-md-1-bootstrap-"}, + {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-"}, + {kind: "DockerMachinePool", namespace: "default", namePrefix: "my-cluster-mp-0-"}, + {kind: "DockerMachinePool", namespace: "default", namePrefix: "my-cluster-mp-1-"}, + {kind: "KubeadmConfigTemplate", namespace: "default", namePrefix: "my-cluster-md-0-"}, + {kind: "KubeadmConfigTemplate", namespace: "default", namePrefix: "my-cluster-md-1-"}, {kind: "KubeadmControlPlane", namespace: "default", namePrefix: "my-cluster-"}, {kind: "MachineDeployment", namespace: "default", namePrefix: "my-cluster-md-0-"}, {kind: "MachineDeployment", namespace: "default", namePrefix: "my-cluster-md-1-"}, + {kind: "MachinePool", namespace: "default", namePrefix: "my-cluster-mp-0-"}, + {kind: "MachinePool", namespace: "default", namePrefix: "my-cluster-mp-1-"}, }, modified: []item{ {kind: "Cluster", namespace: "default", namePrefix: "my-cluster"}, @@ -169,7 +178,7 @@ func Test_topologyClient_Plan(t *testing.T) { created: []item{ // Modifying the DockerClusterTemplate will result in template rotation. A new template will be created // and used by KCP. - {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-control-plane-"}, + {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-"}, }, reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, }, @@ -204,6 +213,35 @@ func Test_topologyClient_Plan(t *testing.T) { }, wantErr: false, }, + { + name: "Modifying an existing DockerMachinePoolTemplate. Affects multiple clusters. Target Cluster not specified.", + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + existingMySecondClusterYAML, + ), + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedDockerMachinePoolTemplateYAML), + }, + }, + want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + cluster2 := client.ObjectKey{Namespace: "default", Name: "my-second-cluster"} + return []client.ObjectKey{cluster, cluster2} + }(), + affectedClusterClasses: func() []client.ObjectKey { + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} + return []client.ObjectKey{cc} + }(), + modified: []item{}, + created: []item{}, + reconciledCluster: nil, + }, + wantErr: false, + }, { name: "Modifying an existing DockerMachineTemplate. Affects multiple clusters. Target Cluster specified.", existingObjects: mustToUnstructured( @@ -234,7 +272,38 @@ func Test_topologyClient_Plan(t *testing.T) { created: []item{ // Modifying the DockerClusterTemplate will result in template rotation. A new template will be created // and used by KCP. - {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-control-plane-"}, + {kind: "DockerMachineTemplate", namespace: "default", namePrefix: "my-cluster-"}, + }, + reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, + }, + wantErr: false, + }, + { + name: "Modifying an existing DockerMachinePoolTemplate. Affects multiple clusters. Target Cluster specified.", + existingObjects: mustToUnstructured( + mockCRDsYAML, + existingMyClusterClassYAML, + existingMyClusterYAML, + existingMySecondClusterYAML, + ), + args: args{ + in: &TopologyPlanInput{ + Objs: mustToUnstructured(modifiedDockerMachinePoolTemplateYAML), + TargetClusterName: "my-cluster", + }, + }, + want: out{ + affectedClusters: func() []client.ObjectKey { + cluster := client.ObjectKey{Namespace: "default", Name: "my-cluster"} + cluster2 := client.ObjectKey{Namespace: "default", Name: "my-second-cluster"} + return []client.ObjectKey{cluster, cluster2} + }(), + affectedClusterClasses: func() []client.ObjectKey { + cc := client.ObjectKey{Namespace: "default", Name: "my-cluster-class"} + return []client.ObjectKey{cc} + }(), + created: []item{ + {kind: "DockerMachinePool", namespace: "default", namePrefix: "my-cluster-"}, }, reconciledCluster: &client.ObjectKey{Namespace: "default", Name: "my-cluster"}, }, @@ -264,6 +333,8 @@ func Test_topologyClient_Plan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + existingObjects := []client.Object{} for _, o := range tt.existingObjects { existingObjects = append(existingObjects, o) @@ -275,13 +346,13 @@ func Test_topologyClient_Plan(t *testing.T) { inventoryClient, ) - res, err := tc.Plan(tt.args.in) + res, err := tc.Plan(ctx, tt.args.in) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } // The plan should function should not return any error. - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // Check affected ClusterClasses. g.Expect(res.ClusterClasses).To(HaveLen(len(tt.want.affectedClusterClasses))) @@ -300,7 +371,7 @@ func Test_topologyClient_Plan(t *testing.T) { g.Expect(res.ReconciledCluster).To(BeNil()) } else { g.Expect(res.ReconciledCluster).NotTo(BeNil()) - g.Expect(*res.ReconciledCluster).To(Equal(*tt.want.reconciledCluster)) + g.Expect(*res.ReconciledCluster).To(BeComparableTo(*tt.want.reconciledCluster)) } // Check the created objects. diff --git a/cmd/clusterctl/client/cluster/upgrader.go b/cmd/clusterctl/client/cluster/upgrader.go index 43400ec59116..7e188564524b 100644 --- a/cmd/clusterctl/client/cluster/upgrader.go +++ b/cmd/clusterctl/client/cluster/upgrader.go @@ -26,7 +26,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/utils/pointer" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -38,16 +39,14 @@ import ( // ProviderUpgrader defines methods for supporting provider upgrade. type ProviderUpgrader interface { - // Plan returns a set of suggested Upgrade plans for the management cluster, and more specifically: - // - Upgrade to the latest version in the v1alpha3 series: .... - // - Upgrade to the latest version in the v1alpha4 series: .... - Plan() ([]UpgradePlan, error) + // Plan returns a set of suggested Upgrade plans for the management cluster. + Plan(ctx context.Context) ([]UpgradePlan, error) // ApplyPlan executes an upgrade following an UpgradePlan generated by clusterctl. - ApplyPlan(opts UpgradeOptions, clusterAPIVersion string) error + ApplyPlan(ctx context.Context, opts UpgradeOptions, clusterAPIVersion string) error // ApplyCustomPlan plan executes an upgrade using the UpgradeItems provided by the user. - ApplyCustomPlan(opts UpgradeOptions, providersToUpgrade ...UpgradeItem) error + ApplyCustomPlan(ctx context.Context, opts UpgradeOptions, providersToUpgrade ...UpgradeItem) error } // UpgradePlan defines a list of possible upgrade targets for a management cluster. @@ -93,11 +92,11 @@ type providerUpgrader struct { var _ ProviderUpgrader = &providerUpgrader{} -func (u *providerUpgrader) Plan() ([]UpgradePlan, error) { +func (u *providerUpgrader) Plan(ctx context.Context) ([]UpgradePlan, error) { log := logf.Log log.Info("Checking new release availability...") - providerList, err := u.providerInventory.List() + providerList, err := u.providerInventory.List(ctx) if err != nil { return nil, err } @@ -108,8 +107,6 @@ func (u *providerUpgrader) Plan() ([]UpgradePlan, error) { // all the providers in the management cluster can upgrade to the latest release supporting v1alpha4, or if available, // all the providers can upgrade to the latest release supporting v1alpha5 (not supported in current clusterctl release, // but upgrade plan should report these options) - // Please note that upgrade plan also works on management cluster still in v1alpha3. In this case upgrade plan is shown, but - // upgrade to latest version in the v1alpha3 series are not supported using clusterctl v1alpha4 (use older releases). // Gets the upgrade info for the core provider. coreProviders := providerList.FilterCore() @@ -118,7 +115,7 @@ func (u *providerUpgrader) Plan() ([]UpgradePlan, error) { } coreProvider := coreProviders[0] - coreUpgradeInfo, err := u.getUpgradeInfo(coreProvider) + coreUpgradeInfo, err := u.getUpgradeInfo(ctx, coreProvider) if err != nil { return nil, err } @@ -132,12 +129,11 @@ func (u *providerUpgrader) Plan() ([]UpgradePlan, error) { // Creates an UpgradePlan for each contract considered for upgrades; each upgrade plans contains // an UpgradeItem for each provider defining the next available version with the target contract, if available. - // e.g. v1alpha3, cluster-api --> v0.3.2, kubeadm bootstrap --> v0.3.2, aws --> v0.5.4 (not supported in current clusterctl release, but upgrade plan should report these options). // e.g. v1alpha4, cluster-api --> v0.4.1, kubeadm bootstrap --> v0.4.1, aws --> v0.X.2 // e.g. v1alpha4, cluster-api --> v0.5.1, kubeadm bootstrap --> v0.5.1, aws --> v0.Y.4 (not supported in current clusterctl release, but upgrade plan should report these options). ret := make([]UpgradePlan, 0) for _, contract := range contractsForUpgrade { - upgradePlan, err := u.getUpgradePlan(providerList.Items, contract) + upgradePlan, err := u.getUpgradePlan(ctx, providerList.Items, contract) if err != nil { return nil, err } @@ -155,7 +151,7 @@ func (u *providerUpgrader) Plan() ([]UpgradePlan, error) { return ret, nil } -func (u *providerUpgrader) ApplyPlan(opts UpgradeOptions, contract string) error { +func (u *providerUpgrader) ApplyPlan(ctx context.Context, opts UpgradeOptions, contract string) error { if contract != clusterv1.GroupVersion.Version { return errors.Errorf("current version of clusterctl could only upgrade to %s contract, requested %s", clusterv1.GroupVersion.Version, contract) } @@ -164,42 +160,42 @@ func (u *providerUpgrader) ApplyPlan(opts UpgradeOptions, contract string) error log.Info("Performing upgrade...") // Gets the upgrade plan for the selected API Version of Cluster API (contract). - providerList, err := u.providerInventory.List() + providerList, err := u.providerInventory.List(ctx) if err != nil { return err } - upgradePlan, err := u.getUpgradePlan(providerList.Items, contract) + upgradePlan, err := u.getUpgradePlan(ctx, providerList.Items, contract) if err != nil { return err } // Do the upgrade - return u.doUpgrade(upgradePlan, opts) + return u.doUpgrade(ctx, upgradePlan, opts) } -func (u *providerUpgrader) ApplyCustomPlan(opts UpgradeOptions, upgradeItems ...UpgradeItem) error { +func (u *providerUpgrader) ApplyCustomPlan(ctx context.Context, opts UpgradeOptions, upgradeItems ...UpgradeItem) error { log := logf.Log log.Info("Performing upgrade...") // Create a custom upgrade plan from the upgrade items, taking care of ensuring all the providers in a management // cluster are consistent with the API Version of Cluster API (contract). - upgradePlan, err := u.createCustomPlan(upgradeItems) + upgradePlan, err := u.createCustomPlan(ctx, upgradeItems) if err != nil { return err } // Do the upgrade - return u.doUpgrade(upgradePlan, opts) + return u.doUpgrade(ctx, upgradePlan, opts) } // getUpgradePlan returns the upgrade plan for a specific set of providers/contract // NB. this function is used both for upgrade plan and upgrade apply. -func (u *providerUpgrader) getUpgradePlan(providers []clusterctlv1.Provider, contract string) (*UpgradePlan, error) { +func (u *providerUpgrader) getUpgradePlan(ctx context.Context, providers []clusterctlv1.Provider, contract string) (*UpgradePlan, error) { upgradeItems := []UpgradeItem{} for _, provider := range providers { // Gets the upgrade info for the provider. - providerUpgradeInfo, err := u.getUpgradeInfo(provider) + providerUpgradeInfo, err := u.getUpgradeInfo(ctx, provider) if err != nil { return nil, err } @@ -222,14 +218,14 @@ func (u *providerUpgrader) getUpgradePlan(providers []clusterctlv1.Provider, con // createCustomPlan creates a custom upgrade plan from a set of upgrade items, taking care of ensuring all the providers // in a management cluster are consistent with the API Version of Cluster API (contract). -func (u *providerUpgrader) createCustomPlan(upgradeItems []UpgradeItem) (*UpgradePlan, error) { +func (u *providerUpgrader) createCustomPlan(ctx context.Context, upgradeItems []UpgradeItem) (*UpgradePlan, error) { // Gets the API Version of Cluster API (contract). // The this is required to ensure all the providers in a management cluster are consistent with the contract supported by the core provider. - // e.g if the core provider is v1alpha3, all the provider should be v1alpha3 as well. + // e.g if the core provider is v1beta1, all the provider should be v1beta1 as well. // The target contract is derived from the current version of the core provider, or, if the core provider is included in the upgrade list, // from its target version. - providerList, err := u.providerInventory.List() + providerList, err := u.providerInventory.List(ctx) if err != nil { return nil, err } @@ -247,7 +243,7 @@ func (u *providerUpgrader) createCustomPlan(upgradeItems []UpgradeItem) (*Upgrad } } - targetContract, err := u.getProviderContractByVersion(coreProvider, targetCoreProviderVersion) + targetContract, err := u.getProviderContractByVersion(ctx, coreProvider, targetCoreProviderVersion) if err != nil { return nil, err } @@ -276,7 +272,7 @@ func (u *providerUpgrader) createCustomPlan(upgradeItems []UpgradeItem) (*Upgrad } // Retrieves the contract that is supported by the target version of the provider. - contract, err := u.getProviderContractByVersion(*provider, upgradeItem.NextVersion) + contract, err := u.getProviderContractByVersion(ctx, *provider, upgradeItem.NextVersion) if err != nil { return nil, err } @@ -297,7 +293,7 @@ func (u *providerUpgrader) createCustomPlan(upgradeItems []UpgradeItem) (*Upgrad } // Retrieves the contract that is supported by the current version of the provider. - contract, err := u.getProviderContractByVersion(provider, provider.Version) + contract, err := u.getProviderContractByVersion(ctx, provider, provider.Version) if err != nil { return nil, err } @@ -310,14 +306,14 @@ func (u *providerUpgrader) createCustomPlan(upgradeItems []UpgradeItem) (*Upgrad } // getProviderContractByVersion returns the contract that a provider will support if updated to the given target version. -func (u *providerUpgrader) getProviderContractByVersion(provider clusterctlv1.Provider, targetVersion string) (string, error) { +func (u *providerUpgrader) getProviderContractByVersion(ctx context.Context, provider clusterctlv1.Provider, targetVersion string) (string, error) { targetSemVersion, err := version.ParseSemantic(targetVersion) if err != nil { return "", errors.Wrapf(err, "failed to parse target version for the %s provider", provider.InstanceName()) } // Gets the metadata for the core Provider - upgradeInfo, err := u.getUpgradeInfo(provider) + upgradeInfo, err := u.getUpgradeInfo(ctx, provider) if err != nil { return "", err } @@ -330,13 +326,13 @@ func (u *providerUpgrader) getProviderContractByVersion(provider clusterctlv1.Pr } // getUpgradeComponents returns the provider components for the selected target version. -func (u *providerUpgrader) getUpgradeComponents(provider UpgradeItem) (repository.Components, error) { +func (u *providerUpgrader) getUpgradeComponents(ctx context.Context, provider UpgradeItem) (repository.Components, error) { configRepository, err := u.configClient.Providers().Get(provider.ProviderName, provider.GetProviderType()) if err != nil { return nil, err } - providerRepository, err := u.repositoryClientFactory(configRepository, u.configClient) + providerRepository, err := u.repositoryClientFactory(ctx, configRepository, u.configClient) if err != nil { return nil, err } @@ -345,17 +341,18 @@ func (u *providerUpgrader) getUpgradeComponents(provider UpgradeItem) (repositor Version: provider.NextVersion, TargetNamespace: provider.Namespace, } - components, err := providerRepository.Components().Get(options) + components, err := providerRepository.Components().Get(ctx, options) if err != nil { return nil, err } return components, nil } -func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptions) error { +func (u *providerUpgrader) doUpgrade(ctx context.Context, upgradePlan *UpgradePlan, opts UpgradeOptions) error { // Check for multiple instances of the same provider if current contract is v1alpha3. + // TODO(killianmuldoon) Assess if we can remove this piece of code. if upgradePlan.Contract == clusterv1.GroupVersion.Version { - if err := u.providerInventory.CheckSingleProviderInstance(); err != nil { + if err := u.providerInventory.CheckSingleProviderInstance(ctx); err != nil { return err } } @@ -376,17 +373,17 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptio } // Gets the provider components for the target version. - components, err := u.getUpgradeComponents(upgradeItem) + components, err := u.getUpgradeComponents(ctx, upgradeItem) if err != nil { return err } - c, err := u.proxy.NewClient() + c, err := u.proxy.NewClient(ctx) if err != nil { return err } - if err := newCRDMigrator(c).Run(ctx, components.Objs()); err != nil { + if err := NewCRDMigrator(c).Run(ctx, components.Objs()); err != nil { return err } } @@ -406,7 +403,7 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptio } // Scale down provider. - if err := u.scaleDownProvider(upgradeItem.Provider); err != nil { + if err := u.scaleDownProvider(ctx, upgradeItem.Provider); err != nil { return err } } @@ -421,7 +418,7 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptio } // Gets the provider components for the target version. - components, err := u.getUpgradeComponents(upgradeItem) + components, err := u.getUpgradeComponents(ctx, upgradeItem) if err != nil { return err } @@ -429,7 +426,7 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptio installQueue = append(installQueue, components) // Delete the provider, preserving CRD, namespace and the inventory. - if err := u.providerComponents.Delete(DeleteOptions{ + if err := u.providerComponents.Delete(ctx, DeleteOptions{ Provider: upgradeItem.Provider, IncludeNamespace: false, IncludeCRDs: false, @@ -439,26 +436,26 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptio } // Install the new version of the provider components. - if err := installComponentsAndUpdateInventory(components, u.providerComponents, u.providerInventory); err != nil { + if err := installComponentsAndUpdateInventory(ctx, components, u.providerComponents, u.providerInventory); err != nil { return err } } // Delete webhook namespace since it's not needed from v1alpha4. if upgradePlan.Contract == clusterv1.GroupVersion.Version { - if err := u.providerComponents.DeleteWebhookNamespace(); err != nil { + if err := u.providerComponents.DeleteWebhookNamespace(ctx); err != nil { return err } } - return waitForProvidersReady(InstallOptions(opts), installQueue, u.proxy) + return waitForProvidersReady(ctx, InstallOptions(opts), installQueue, u.proxy) } -func (u *providerUpgrader) scaleDownProvider(provider clusterctlv1.Provider) error { +func (u *providerUpgrader) scaleDownProvider(ctx context.Context, provider clusterctlv1.Provider) error { log := logf.Log - log.Info("Scaling down", "Provider", provider.Name, "Version", provider.Version, "Namespace", provider.Namespace) + log.Info("Scaling down", "Provider", klog.KObj(&provider), "providerVersion", &provider.Version) - cs, err := u.proxy.NewClient() + cs, err := u.proxy.NewClient(ctx) if err != nil { return err } @@ -477,7 +474,8 @@ func (u *providerUpgrader) scaleDownProvider(provider clusterctlv1.Provider) err // Scale down provider Deployments. for _, deployment := range deploymentList.Items { - log.V(5).Info("Scaling down", "Deployment", deployment.Name, "Namespace", deployment.Namespace) + deployment := deployment + log.V(5).Info("Scaling down", "Deployment", klog.KObj(&deployment)) if err := scaleDownDeployment(ctx, cs, deployment); err != nil { return err } @@ -488,7 +486,7 @@ func (u *providerUpgrader) scaleDownProvider(provider clusterctlv1.Provider) err // scaleDownDeployment scales down a Deployment to 0 and waits until all replicas have been deleted. func scaleDownDeployment(ctx context.Context, c client.Client, deploy appsv1.Deployment) error { - if err := retryWithExponentialBackoff(newWriteBackoff(), func() error { + if err := retryWithExponentialBackoff(ctx, newWriteBackoff(), func(ctx context.Context) error { deployment := &appsv1.Deployment{} if err := c.Get(ctx, client.ObjectKeyFromObject(&deploy), deployment); err != nil { return errors.Wrapf(err, "failed to get Deployment/%s", deploy.GetName()) @@ -500,7 +498,7 @@ func scaleDownDeployment(ctx context.Context, c client.Client, deploy appsv1.Dep } // Scale down. - deployment.Spec.Replicas = pointer.Int32(0) + deployment.Spec.Replicas = ptr.To[int32](0) if err := c.Update(ctx, deployment); err != nil { return errors.Wrapf(err, "failed to update Deployment/%s", deploy.GetName()) } @@ -515,7 +513,7 @@ func scaleDownDeployment(ctx context.Context, c client.Client, deploy appsv1.Dep Steps: 60, Jitter: 0.4, } - if err := retryWithExponentialBackoff(deploymentScaleToZeroBackOff, func() error { + if err := retryWithExponentialBackoff(ctx, deploymentScaleToZeroBackOff, func(ctx context.Context) error { deployment := &appsv1.Deployment{} if err := c.Get(ctx, client.ObjectKeyFromObject(&deploy), deployment); err != nil { return errors.Wrapf(err, "failed to get Deployment/%s", deploy.GetName()) diff --git a/cmd/clusterctl/client/cluster/upgrader_info.go b/cmd/clusterctl/client/cluster/upgrader_info.go index 8033b79b7e0a..40dc6b3f9a2c 100644 --- a/cmd/clusterctl/client/cluster/upgrader_info.go +++ b/cmd/clusterctl/client/cluster/upgrader_info.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "sort" @@ -46,19 +47,19 @@ type upgradeInfo struct { // getUpgradeInfo returns all the info required for taking upgrade decisions for a provider. // NOTE: This could contain also versions for the previous or next Cluster API contract (not supported in current clusterctl release, but upgrade plan should report this options). -func (u *providerUpgrader) getUpgradeInfo(provider clusterctlv1.Provider) (*upgradeInfo, error) { +func (u *providerUpgrader) getUpgradeInfo(ctx context.Context, provider clusterctlv1.Provider) (*upgradeInfo, error) { // Gets the list of versions available in the provider repository. configRepository, err := u.configClient.Providers().Get(provider.ProviderName, provider.GetProviderType()) if err != nil { return nil, err } - providerRepository, err := u.repositoryClientFactory(configRepository, u.configClient) + providerRepository, err := u.repositoryClientFactory(ctx, configRepository, u.configClient) if err != nil { return nil, err } - repositoryVersions, err := providerRepository.GetVersions() + repositoryVersions, err := providerRepository.GetVersions(ctx) if err != nil { return nil, err } @@ -80,7 +81,7 @@ func (u *providerUpgrader) getUpgradeInfo(provider clusterctlv1.Provider) (*upgr } } - latestMetadata, err := providerRepository.Metadata(versionTag(latestVersion)).Get() + latestMetadata, err := providerRepository.Metadata(versionTag(latestVersion)).Get(ctx) if err != nil { return nil, err } diff --git a/cmd/clusterctl/client/cluster/upgrader_info_test.go b/cmd/clusterctl/client/cluster/upgrader_info_test.go index 2fb6117b3fde..ca1bd0fe9576 100644 --- a/cmd/clusterctl/client/cluster/upgrader_info_test.go +++ b/cmd/clusterctl/client/cluster/upgrader_info_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "testing" . "github.com/onsi/gomega" @@ -228,19 +229,19 @@ func Test_providerUpgrader_getUpgradeInfo(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + configClient, _ := config.New(context.Background(), "", config.InjectReader(tt.fields.reader)) u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repo)) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repo)) }, } - got, err := u.getUpgradeInfo(tt.args.provider) + got, err := u.getUpgradeInfo(context.Background(), tt.args.provider) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } g.Expect(got).To(Equal(tt.want)) }) diff --git a/cmd/clusterctl/client/cluster/upgrader_test.go b/cmd/clusterctl/client/cluster/upgrader_test.go index fad159ec74f4..322ad4c74d4a 100644 --- a/cmd/clusterctl/client/cluster/upgrader_test.go +++ b/cmd/clusterctl/client/cluster/upgrader_test.go @@ -17,12 +17,12 @@ limitations under the License. package cluster import ( + "context" "testing" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" - clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/repository" @@ -137,81 +137,6 @@ func Test_providerUpgrader_Plan(t *testing.T) { }, wantErr: false, }, - { - name: "Upgrade for v1alpha3 (not supported), previous contract (not supported), current contract", // upgrade plan should report unsupported options - fields: fields{ - // config for two providers - reader: test.NewFakeReader(). - WithProvider("cluster-api", clusterctlv1.CoreProviderType, "https://somewhere.com"). - WithProvider("infra", clusterctlv1.InfrastructureProviderType, "https://somewhere.com"), - repository: map[string]repository.Repository{ - "cluster-api": repository.NewMemoryRepository(). - WithVersions("v1.0.0", "v1.0.1", "v2.0.0", "v3.0.0"). - WithMetadata("v3.0.0", &clusterctlv1.Metadata{ - ReleaseSeries: []clusterctlv1.ReleaseSeries{ - {Major: 1, Minor: 0, Contract: clusterv1alpha3.GroupVersion.Version}, - {Major: 2, Minor: 0, Contract: test.PreviousCAPIContractNotSupported}, - {Major: 3, Minor: 0, Contract: test.CurrentCAPIContract}, - }, - }), - "infrastructure-infra": repository.NewMemoryRepository(). - WithVersions("v1.0.0", "v2.0.0", "v2.0.1", "v3.0.0"). - WithMetadata("v3.0.0", &clusterctlv1.Metadata{ - ReleaseSeries: []clusterctlv1.ReleaseSeries{ - {Major: 1, Minor: 0, Contract: clusterv1alpha3.GroupVersion.Version}, - {Major: 2, Minor: 0, Contract: test.PreviousCAPIContractNotSupported}, - {Major: 3, Minor: 0, Contract: test.CurrentCAPIContract}, - }, - }), - }, - // two providers existing in the cluster - proxy: test.NewFakeProxy(). - WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system"). - WithProviderInventory("infra", clusterctlv1.InfrastructureProviderType, "v1.0.0", "infra-system"), - }, - want: []UpgradePlan{ - { // one upgrade plan with the latest releases in the v1alpha3 contract (not supported, but upgrade plan should report these options) - Contract: clusterv1alpha3.GroupVersion.Version, - Providers: []UpgradeItem{ - { - Provider: fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system"), - NextVersion: "v1.0.1", - }, - { - Provider: fakeProvider("infra", clusterctlv1.InfrastructureProviderType, "v1.0.0", "infra-system"), - NextVersion: "", - }, - }, - }, - { // one upgrade plan with the latest releases in the previous contract (not supported, but upgrade plan should report these options) - Contract: test.PreviousCAPIContractNotSupported, - Providers: []UpgradeItem{ - { - Provider: fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system"), - NextVersion: "v2.0.0", - }, - { - Provider: fakeProvider("infra", clusterctlv1.InfrastructureProviderType, "v1.0.0", "infra-system"), - NextVersion: "v2.0.1", - }, - }, - }, - { // one upgrade plan with the latest releases in the current contract - Contract: test.CurrentCAPIContract, - Providers: []UpgradeItem{ - { - Provider: fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system"), - NextVersion: "v3.0.0", - }, - { - Provider: fakeProvider("infra", clusterctlv1.InfrastructureProviderType, "v1.0.0", "infra-system"), - NextVersion: "v3.0.0", - }, - }, - }, - }, - wantErr: false, - }, { name: "Upgrade for previous contract (not supported), current contract", // upgrade plan should report unsupported options fields: fields{ @@ -384,23 +309,25 @@ func Test_providerUpgrader_Plan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + ctx := context.Background() + + configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), } - got, err := u.Plan() + got, err := u.Plan(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(got).To(Equal(tt.want), cmp.Diff(got, tt.want)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(BeComparableTo(tt.want), cmp.Diff(got, tt.want)) }) } } @@ -853,23 +780,25 @@ func Test_providerUpgrader_createCustomPlan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + ctx := context.Background() + + configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.Name()])) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.Name()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), } - got, err := u.createCustomPlan(tt.args.providersToUpgrade) + got, err := u.createCustomPlan(ctx, tt.args.providersToUpgrade) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(BeComparableTo(tt.want)) }) } } @@ -970,23 +899,25 @@ func Test_providerUpgrader_ApplyPlan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + ctx := context.Background() + + configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), } - err := u.ApplyPlan(tt.opts, tt.contract) + err := u.ApplyPlan(ctx, tt.opts, tt.contract) if tt.wantErr { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).Should(ContainSubstring(tt.errorMsg)) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } @@ -1109,23 +1040,25 @@ func Test_providerUpgrader_ApplyCustomPlan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + ctx := context.Background() + + configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), } - err := u.ApplyCustomPlan(tt.opts, tt.providersToUpgrade...) + err := u.ApplyCustomPlan(ctx, tt.opts, tt.providersToUpgrade...) if tt.wantErr { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).Should(ContainSubstring(tt.errorMsg)) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } diff --git a/cmd/clusterctl/client/cluster/workload_cluster.go b/cmd/clusterctl/client/cluster/workload_cluster.go index a5e085dae2c9..7cb52ba24685 100644 --- a/cmd/clusterctl/client/cluster/workload_cluster.go +++ b/cmd/clusterctl/client/cluster/workload_cluster.go @@ -17,6 +17,8 @@ limitations under the License. package cluster import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/controller-runtime/pkg/client" @@ -26,7 +28,7 @@ import ( // WorkloadCluster has methods for fetching kubeconfig of workload cluster from management cluster. type WorkloadCluster interface { // GetKubeconfig returns the kubeconfig of the workload cluster. - GetKubeconfig(workloadClusterName string, namespace string) (string, error) + GetKubeconfig(ctx context.Context, workloadClusterName string, namespace string) (string, error) } // workloadCluster implements WorkloadCluster. @@ -41,8 +43,8 @@ func newWorkloadCluster(proxy Proxy) *workloadCluster { } } -func (p *workloadCluster) GetKubeconfig(workloadClusterName string, namespace string) (string, error) { - cs, err := p.proxy.NewClient() +func (p *workloadCluster) GetKubeconfig(ctx context.Context, workloadClusterName string, namespace string) (string, error) { + cs, err := p.proxy.NewClient(ctx) if err != nil { return "", err } diff --git a/cmd/clusterctl/client/cluster/workload_cluster_test.go b/cmd/clusterctl/client/cluster/workload_cluster_test.go index 50333d3efb08..4759a7b67796 100644 --- a/cmd/clusterctl/client/cluster/workload_cluster_test.go +++ b/cmd/clusterctl/client/cluster/workload_cluster_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "testing" . "github.com/onsi/gomega" @@ -84,8 +85,10 @@ users: t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + wc := newWorkloadCluster(tt.proxy) - data, err := wc.GetKubeconfig("test1", "test") + data, err := wc.GetKubeconfig(ctx, "test1", "test") if tt.expectErr { g.Expect(err).To(HaveOccurred()) diff --git a/cmd/clusterctl/client/clusterclass.go b/cmd/clusterctl/client/clusterclass.go index bb0872a29405..ea0b8d965225 100644 --- a/cmd/clusterctl/client/clusterclass.go +++ b/cmd/clusterctl/client/clusterclass.go @@ -34,7 +34,7 @@ import ( // addClusterClassIfMissing returns a Template that includes the base template and adds any cluster class definitions that // are references in the template. If the cluster class referenced already exists in the cluster it is not added to the // template. -func addClusterClassIfMissing(template Template, clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, targetNamespace string, listVariablesOnly bool) (Template, error) { +func addClusterClassIfMissing(ctx context.Context, template Template, clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, targetNamespace string, listVariablesOnly bool) (Template, error) { classes, err := clusterClassNamesFromTemplate(template) if err != nil { return nil, err @@ -44,7 +44,7 @@ func addClusterClassIfMissing(template Template, clusterClassClient repository.C return template, nil } - clusterClassesTemplate, err := fetchMissingClusterClassTemplates(clusterClassClient, clusterClient, classes, targetNamespace, listVariablesOnly) + clusterClassesTemplate, err := fetchMissingClusterClassTemplates(ctx, clusterClassClient, clusterClient, classes, targetNamespace, listVariablesOnly) if err != nil { return nil, err } @@ -87,7 +87,7 @@ func clusterClassNamesFromTemplate(template Template) ([]string, error) { // fetchMissingClusterClassTemplates returns a list of templates for ClusterClasses that do not yet exist // in the cluster. If the cluster is not initialized, all the ClusterClasses are added. -func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, classes []string, targetNamespace string, listVariablesOnly bool) (Template, error) { +func fetchMissingClusterClassTemplates(ctx context.Context, clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, classes []string, targetNamespace string, listVariablesOnly bool) (Template, error) { // first check if the cluster is initialized. // If it is initialized: // For every ClusterClass check if it already exists in the cluster. @@ -99,26 +99,26 @@ func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClas // Check if the cluster is initialized clusterInitialized := false var err error - if err := clusterClient.Proxy().CheckClusterAvailable(); err == nil { - clusterInitialized, err = clusterClient.ProviderInventory().CheckCAPIInstalled() + if err := clusterClient.Proxy().CheckClusterAvailable(ctx); err == nil { + clusterInitialized, err = clusterClient.ProviderInventory().CheckCAPIInstalled(ctx) if err != nil { return nil, errors.Wrap(err, "failed to check if the cluster is initialized") } } var c client.Client if clusterInitialized { - c, err = clusterClient.Proxy().NewClient() + c, err = clusterClient.Proxy().NewClient(ctx) if err != nil { return nil, err } } // Get the templates for all ClusterClasses and associated objects if the target - // ClusterClass does not exits in the cluster. + // ClusterClass does not exist in the cluster. templates := []repository.Template{} for _, class := range classes { if clusterInitialized { - exists, err := clusterClassExists(c, class, targetNamespace) + exists, err := clusterClassExists(ctx, c, class, targetNamespace) if err != nil { return nil, err } @@ -128,7 +128,7 @@ func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClas } // The cluster is either not initialized or the ClusterClass does not yet exist in the cluster. // Fetch the cluster class to install. - clusterClassTemplate, err := clusterClassClient.Get(class, targetNamespace, listVariablesOnly) + clusterClassTemplate, err := clusterClassClient.Get(ctx, class, targetNamespace, listVariablesOnly) if err != nil { return nil, errors.Wrapf(err, "failed to get the cluster class template for %q", class) } @@ -139,7 +139,7 @@ func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClas // that we do not add a ClusterClass (and associated objects) who definition is unknown. if clusterInitialized { for _, obj := range clusterClassTemplate.Objs() { - if exists, err := objExists(c, obj); err != nil { + if exists, err := objExists(ctx, c, obj); err != nil { return nil, err } else if exists { return nil, fmt.Errorf("%s(%s) already exists in the cluster", obj.GetName(), obj.GetObjectKind().GroupVersionKind()) @@ -157,9 +157,9 @@ func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClas return merged, nil } -func clusterClassExists(c client.Client, class, targetNamespace string) (bool, error) { +func clusterClassExists(ctx context.Context, c client.Client, class, targetNamespace string) (bool, error) { clusterClass := &clusterv1.ClusterClass{} - if err := c.Get(context.TODO(), client.ObjectKey{Name: class, Namespace: targetNamespace}, clusterClass); err != nil { + if err := c.Get(ctx, client.ObjectKey{Name: class, Namespace: targetNamespace}, clusterClass); err != nil { if apierrors.IsNotFound(err) { return false, nil } @@ -168,9 +168,9 @@ func clusterClassExists(c client.Client, class, targetNamespace string) (bool, e return true, nil } -func objExists(c client.Client, obj unstructured.Unstructured) (bool, error) { +func objExists(ctx context.Context, c client.Client, obj unstructured.Unstructured) (bool, error) { o := obj.DeepCopy() - if err := c.Get(context.TODO(), client.ObjectKeyFromObject(o), o); err != nil { + if err := c.Get(ctx, client.ObjectKeyFromObject(o), o); err != nil { if apierrors.IsNotFound(err) { return false, nil } diff --git a/cmd/clusterctl/client/clusterclass_test.go b/cmd/clusterctl/client/clusterclass_test.go index 0ab79e7324b0..912df08a9bdd 100644 --- a/cmd/clusterctl/client/clusterclass_test.go +++ b/cmd/clusterctl/client/clusterclass_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "fmt" "testing" @@ -67,12 +68,14 @@ func TestClusterClassExists(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - config := newFakeConfig() + ctx := context.Background() + + config := newFakeConfig(ctx) client := newFakeCluster(cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, config).WithObjs(tt.objs...) - c, _ := client.Proxy().NewClient() + c, _ := client.Proxy().NewClient(ctx) - actual, err := clusterClassExists(c, tt.clusterClass, metav1.NamespaceDefault) - g.Expect(err).NotTo(HaveOccurred()) + actual, err := clusterClassExists(ctx, c, tt.clusterClass, metav1.NamespaceDefault) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(actual).To(Equal(tt.want)) }) } @@ -152,8 +155,10 @@ func TestAddClusterClassIfMissing(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - config1 := newFakeConfig().WithProvider(infraProviderConfig) - repository1 := newFakeRepository(infraProviderConfig, config1). + ctx := context.Background() + + config1 := newFakeConfig(ctx).WithProvider(infraProviderConfig) + repository1 := newFakeRepository(ctx, infraProviderConfig, config1). WithPaths("root", ""). WithDefaultVersion("v1.0.0"). WithFile("v1.0.0", "clusterclass-dev.yaml", tt.clusterClassTemplateContent) @@ -205,7 +210,7 @@ func TestAddClusterClassIfMissing(t *testing.T) { } g := NewWithT(t) - template, err := addClusterClassIfMissing(baseTemplate, clusterClassClient, cluster, tt.targetNamespace, tt.listVariablesOnly) + template, err := addClusterClassIfMissing(ctx, baseTemplate, clusterClassClient, cluster, tt.targetNamespace, tt.listVariablesOnly) if tt.wantError { g.Expect(err).To(HaveOccurred()) } else { diff --git a/cmd/clusterctl/client/common.go b/cmd/clusterctl/client/common.go index be299a7307c7..dfd1d42f5bf0 100644 --- a/cmd/clusterctl/client/common.go +++ b/cmd/clusterctl/client/common.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "strings" "github.com/pkg/errors" @@ -28,7 +29,7 @@ import ( // getComponentsByName is a utility method that returns components // for a given provider with options including targetNamespace. -func (c *clusterctlClient) getComponentsByName(provider string, providerType clusterctlv1.ProviderType, options repository.ComponentsOptions) (repository.Components, error) { +func (c *clusterctlClient) getComponentsByName(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options repository.ComponentsOptions) (repository.Components, error) { // Parse the abbreviated syntax for name[:version] name, version, err := parseProviderName(provider) if err != nil { @@ -47,12 +48,12 @@ func (c *clusterctlClient) getComponentsByName(provider string, providerType clu // namespace etc. // Currently we are not supporting custom yaml processors for the provider // components. So we revert to using the default SimpleYamlProcessor. - repositoryClientFactory, err := c.repositoryClientFactory(RepositoryClientFactoryInput{Provider: providerConfig}) + repositoryClientFactory, err := c.repositoryClientFactory(ctx, RepositoryClientFactoryInput{Provider: providerConfig}) if err != nil { return nil, err } - components, err := repositoryClientFactory.Components().Get(options) + components, err := repositoryClientFactory.Components().Get(ctx, options) if err != nil { return nil, err } diff --git a/cmd/clusterctl/client/common_test.go b/cmd/clusterctl/client/common_test.go index 272d7b51f13e..ad3f0748e8cd 100644 --- a/cmd/clusterctl/client/common_test.go +++ b/cmd/clusterctl/client/common_test.go @@ -60,7 +60,7 @@ func Test_parseProviderName(t *testing.T) { if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } g.Expect(gotName).To(Equal(tt.wantName)) diff --git a/cmd/clusterctl/client/config.go b/cmd/clusterctl/client/config.go index 4140c2cf1495..cf915fa0d230 100644 --- a/cmd/clusterctl/client/config.go +++ b/cmd/clusterctl/client/config.go @@ -17,12 +17,13 @@ limitations under the License. package client import ( + "context" "io" "strconv" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/version" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -45,8 +46,8 @@ func (c *clusterctlClient) GetProvidersConfig() ([]Provider, error) { return rr, nil } -func (c *clusterctlClient) GetProviderComponents(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { - components, err := c.getComponentsByName(provider, providerType, repository.ComponentsOptions(options)) +func (c *clusterctlClient) GetProviderComponents(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { + components, err := c.getComponentsByName(ctx, provider, providerType, repository.ComponentsOptions(options)) if err != nil { return nil, err } @@ -71,7 +72,7 @@ type ProcessYAMLOptions struct { SkipTemplateProcess bool } -func (c *clusterctlClient) ProcessYAML(options ProcessYAMLOptions) (YamlPrinter, error) { +func (c *clusterctlClient) ProcessYAML(ctx context.Context, options ProcessYAMLOptions) (YamlPrinter, error) { if options.ReaderSource != nil { // NOTE: Beware of potentially reading in large files all at once // since this is inefficient and increases memory utilziation. @@ -103,7 +104,7 @@ func (c *clusterctlClient) ProcessYAML(options ProcessYAMLOptions) (YamlPrinter, } if options.URLSource != nil { - return c.getTemplateFromURL(clstr, *options.URLSource, "", options.SkipTemplateProcess) + return c.getTemplateFromURL(ctx, clstr, *options.URLSource, "", options.SkipTemplateProcess) } return nil, errors.New("unable to read custom template. Please specify a template source") @@ -134,7 +135,7 @@ type GetClusterTemplateOptions struct { ClusterName string // KubernetesVersion to use for the workload cluster. If unspecified, the value from os env variables - // or the .cluster-api/clusterctl.yaml config file will be used. + // or the $XDG_CONFIG_HOME/cluster-api/clusterctl.yaml or .cluster-api/clusterctl.yaml config file will be used. KubernetesVersion string // ControlPlaneMachineCount defines the number of control plane machines to be added to the workload cluster. @@ -203,7 +204,7 @@ type ConfigMapSourceOptions struct { DataKey string } -func (c *clusterctlClient) GetClusterTemplate(options GetClusterTemplateOptions) (Template, error) { +func (c *clusterctlClient) GetClusterTemplate(ctx context.Context, options GetClusterTemplateOptions) (Template, error) { // Checks that no more than on source is set numsSource := options.numSources() if numsSource > 1 { @@ -224,7 +225,7 @@ func (c *clusterctlClient) GetClusterTemplate(options GetClusterTemplateOptions) // If the option specifying the targetNamespace is empty, try to detect it. if options.TargetNamespace == "" { - if err := clusterClient.Proxy().CheckClusterAvailable(); err != nil { + if err := clusterClient.Proxy().CheckClusterAvailable(ctx); err != nil { return nil, errors.Wrap(err, "management cluster not available. Cannot auto-discover target namespace. Please specify a target namespace") } currentNamespace, err := clusterClient.Proxy().CurrentNamespace() @@ -249,24 +250,24 @@ func (c *clusterctlClient) GetClusterTemplate(options GetClusterTemplateOptions) // users to dry-run the command and take a look at what the cluster will look like; in both scenarios, it is required // to pass provider:version given that auto-discovery can't work without a provider inventory installed in a cluster. if options.Kubeconfig.Path != "" { - if err := clusterClient.ProviderInventory().CheckCAPIContract(cluster.AllowCAPINotInstalled{}); err != nil { + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx, cluster.AllowCAPINotInstalled{}); err != nil { return nil, err } } - return c.getTemplateFromRepository(clusterClient, options) + return c.getTemplateFromRepository(ctx, clusterClient, options) } if options.ConfigMapSource != nil { - return c.getTemplateFromConfigMap(clusterClient, *options.ConfigMapSource, options.TargetNamespace, options.ListVariablesOnly) + return c.getTemplateFromConfigMap(ctx, clusterClient, *options.ConfigMapSource, options.TargetNamespace, options.ListVariablesOnly) } if options.URLSource != nil { - return c.getTemplateFromURL(clusterClient, *options.URLSource, options.TargetNamespace, options.ListVariablesOnly) + return c.getTemplateFromURL(ctx, clusterClient, *options.URLSource, options.TargetNamespace, options.ListVariablesOnly) } return nil, errors.New("unable to read custom template. Please specify a template source") } // getTemplateFromRepository returns a workload cluster template from a provider repository. -func (c *clusterctlClient) getTemplateFromRepository(cluster cluster.Client, options GetClusterTemplateOptions) (Template, error) { +func (c *clusterctlClient) getTemplateFromRepository(ctx context.Context, cluster cluster.Client, options GetClusterTemplateOptions) (Template, error) { source := *options.ProviderRepositorySource targetNamespace := options.TargetNamespace listVariablesOnly := options.ListVariablesOnly @@ -276,16 +277,16 @@ func (c *clusterctlClient) getTemplateFromRepository(cluster cluster.Client, opt provider := source.InfrastructureProvider ensureCustomResourceDefinitions := false if provider == "" { - if err := cluster.Proxy().CheckClusterAvailable(); err != nil { + if err := cluster.Proxy().CheckClusterAvailable(ctx); err != nil { return nil, errors.Wrap(err, "management cluster not available. Cannot auto-discover default infrastructure provider. Please specify an infrastructure provider") } // ensure the custom resource definitions required by clusterctl are in place - if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return nil, errors.Wrapf(err, "provider custom resource definitions (CRDs) are not installed") } ensureCustomResourceDefinitions = true - defaultProviderName, err := cluster.ProviderInventory().GetDefaultProviderName(clusterctlv1.InfrastructureProviderType) + defaultProviderName, err := cluster.ProviderInventory().GetDefaultProviderName(ctx, clusterctlv1.InfrastructureProviderType) if err != nil { return nil, err } @@ -304,17 +305,17 @@ func (c *clusterctlClient) getTemplateFromRepository(cluster cluster.Client, opt // If the version of the infrastructure provider to get templates from is empty, try to detect it. if version == "" { - if err := cluster.Proxy().CheckClusterAvailable(); err != nil { + if err := cluster.Proxy().CheckClusterAvailable(ctx); err != nil { return nil, errors.Wrapf(err, "management cluster not available. Cannot auto-discover version for the provider %q automatically. Please specify a version", name) } // ensure the custom resource definitions required by clusterctl are in place (if not already done) if !ensureCustomResourceDefinitions { - if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return nil, errors.Wrapf(err, "failed to identify the default version for the provider %q. Please specify a version", name) } } - inventoryVersion, err := cluster.ProviderInventory().GetProviderVersion(name, clusterctlv1.InfrastructureProviderType) + inventoryVersion, err := cluster.ProviderInventory().GetProviderVersion(ctx, name, clusterctlv1.InfrastructureProviderType) if err != nil { return nil, err } @@ -331,19 +332,19 @@ func (c *clusterctlClient) getTemplateFromRepository(cluster cluster.Client, opt return nil, err } - repo, err := c.repositoryClientFactory(RepositoryClientFactoryInput{Provider: providerConfig, Processor: processor}) + repo, err := c.repositoryClientFactory(ctx, RepositoryClientFactoryInput{Provider: providerConfig, Processor: processor}) if err != nil { return nil, err } - template, err := repo.Templates(version).Get(source.Flavor, targetNamespace, listVariablesOnly) + template, err := repo.Templates(version).Get(ctx, source.Flavor, targetNamespace, listVariablesOnly) if err != nil { return nil, err } clusterClassClient := repo.ClusterClasses(version) - template, err = addClusterClassIfMissing(template, clusterClassClient, cluster, targetNamespace, listVariablesOnly) + template, err = addClusterClassIfMissing(ctx, template, clusterClassClient, cluster, targetNamespace, listVariablesOnly) if err != nil { return nil, err } @@ -352,7 +353,7 @@ func (c *clusterctlClient) getTemplateFromRepository(cluster cluster.Client, opt } // getTemplateFromConfigMap returns a workload cluster template from a ConfigMap. -func (c *clusterctlClient) getTemplateFromConfigMap(cluster cluster.Client, source ConfigMapSourceOptions, targetNamespace string, listVariablesOnly bool) (Template, error) { +func (c *clusterctlClient) getTemplateFromConfigMap(ctx context.Context, cluster cluster.Client, source ConfigMapSourceOptions, targetNamespace string, listVariablesOnly bool) (Template, error) { // If the option specifying the configMapNamespace is empty, default it to the current namespace. if source.Namespace == "" { currentNamespace, err := cluster.Proxy().CurrentNamespace() @@ -367,12 +368,12 @@ func (c *clusterctlClient) getTemplateFromConfigMap(cluster cluster.Client, sour source.DataKey = DefaultCustomTemplateConfigMapKey } - return cluster.Template().GetFromConfigMap(source.Namespace, source.Name, source.DataKey, targetNamespace, listVariablesOnly) + return cluster.Template().GetFromConfigMap(ctx, source.Namespace, source.Name, source.DataKey, targetNamespace, listVariablesOnly) } // getTemplateFromURL returns a workload cluster template from an URL. -func (c *clusterctlClient) getTemplateFromURL(cluster cluster.Client, source URLSourceOptions, targetNamespace string, listVariablesOnly bool) (Template, error) { - return cluster.Template().GetFromURL(source.URL, targetNamespace, listVariablesOnly) +func (c *clusterctlClient) getTemplateFromURL(ctx context.Context, cluster cluster.Client, source URLSourceOptions, targetNamespace string, listVariablesOnly bool) (Template, error) { + return cluster.Template().GetFromURL(ctx, source.URL, targetNamespace, listVariablesOnly) } // templateOptionsToVariables injects some of the templateOptions to the configClient so they can be consumed as a variables from the template. @@ -403,7 +404,7 @@ func (c *clusterctlClient) templateOptionsToVariables(options GetClusterTemplate if options.ControlPlaneMachineCount == nil { // Check if set through env variable and default to 1 otherwise if v, err := c.configClient.Variables().Get("CONTROL_PLANE_MACHINE_COUNT"); err != nil { - options.ControlPlaneMachineCount = pointer.Int64(1) + options.ControlPlaneMachineCount = ptr.To[int64](1) } else { i, err := strconv.ParseInt(v, 10, 64) if err != nil { @@ -421,7 +422,7 @@ func (c *clusterctlClient) templateOptionsToVariables(options GetClusterTemplate if options.WorkerMachineCount == nil { // Check if set through env variable and default to 0 otherwise if v, err := c.configClient.Variables().Get("WORKER_MACHINE_COUNT"); err != nil { - options.WorkerMachineCount = pointer.Int64(0) + options.WorkerMachineCount = ptr.To[int64](0) } else { i, err := strconv.ParseInt(v, 10, 64) if err != nil { diff --git a/cmd/clusterctl/client/config/cert_manager_client.go b/cmd/clusterctl/client/config/cert_manager_client.go index 990b93824170..12973b4d9057 100644 --- a/cmd/clusterctl/client/config/cert_manager_client.go +++ b/cmd/clusterctl/client/config/cert_manager_client.go @@ -29,7 +29,7 @@ const ( CertManagerConfigKey = "cert-manager" // CertManagerDefaultVersion defines the default cert-manager version to be used by clusterctl. - CertManagerDefaultVersion = "v1.11.0" + CertManagerDefaultVersion = "v1.14.5" // CertManagerDefaultURL defines the default cert-manager repository url to be used by clusterctl. // NOTE: At runtime CertManagerDefaultVersion may be replaced with the diff --git a/cmd/clusterctl/client/config/cert_manager_client_test.go b/cmd/clusterctl/client/config/cert_manager_client_test.go index 698c955cf3fc..d38da6d63be8 100644 --- a/cmd/clusterctl/client/config/cert_manager_client_test.go +++ b/cmd/clusterctl/client/config/cert_manager_client_test.go @@ -93,7 +93,7 @@ func TestCertManagerGet(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } diff --git a/cmd/clusterctl/client/config/client.go b/cmd/clusterctl/client/config/client.go index a25faf9944b1..50f10034cfee 100644 --- a/cmd/clusterctl/client/config/client.go +++ b/cmd/clusterctl/client/config/client.go @@ -17,6 +17,8 @@ limitations under the License. package config import ( + "context" + "github.com/pkg/errors" ) @@ -75,20 +77,23 @@ func InjectReader(reader Reader) Option { } // New returns a Client for interacting with the clusterctl configuration. -func New(path string, options ...Option) (Client, error) { - return newConfigClient(path, options...) +func New(ctx context.Context, path string, options ...Option) (Client, error) { + return newConfigClient(ctx, path, options...) } -func newConfigClient(path string, options ...Option) (*configClient, error) { +func newConfigClient(ctx context.Context, path string, options ...Option) (*configClient, error) { client := &configClient{} for _, o := range options { o(client) } // if there is an injected reader, use it, otherwise use a default one + var err error if client.reader == nil { - client.reader = newViperReader() - if err := client.reader.Init(path); err != nil { + if client.reader, err = newViperReader(); err != nil { + return nil, errors.Wrap(err, "failed to create the configuration reader") + } + if err = client.reader.Init(ctx, path); err != nil { return nil, errors.Wrap(err, "failed to initialize the configuration reader") } } @@ -99,7 +104,7 @@ func newConfigClient(path string, options ...Option) (*configClient, error) { // Reader define the behaviours of a configuration reader. type Reader interface { // Init allows to initialize the configuration reader. - Init(path string) error + Init(ctx context.Context, path string) error // Get returns a configuration value of type string. // In case the configuration value does not exists, it returns an error. diff --git a/cmd/clusterctl/client/config/imagemeta_client_test.go b/cmd/clusterctl/client/config/imagemeta_client_test.go index be5c7bbf71c0..3b383a4b0fcd 100644 --- a/cmd/clusterctl/client/config/imagemeta_client_test.go +++ b/cmd/clusterctl/client/config/imagemeta_client_test.go @@ -252,7 +252,7 @@ func Test_imageMetaClient_AlterImage(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } diff --git a/cmd/clusterctl/client/config/providers_client.go b/cmd/clusterctl/client/config/providers_client.go index fa75ad540f32..09604454e951 100644 --- a/cmd/clusterctl/client/config/providers_client.go +++ b/cmd/clusterctl/client/config/providers_client.go @@ -45,14 +45,18 @@ const ( DOProviderName = "digitalocean" GCPProviderName = "gcp" HetznerProviderName = "hetzner" + HivelocityProviderName = "hivelocity-hivelocity" OutscaleProviderName = "outscale" IBMCloudProviderName = "ibmcloud" + InMemoryProviderName = "in-memory" + LinodeProviderName = "linode-linode" Metal3ProviderName = "metal3" NestedProviderName = "nested" NutanixProviderName = "nutanix" OCIProviderName = "oci" OpenStackProviderName = "openstack" PacketProviderName = "packet" + TinkerbellProviderName = "tinkerbell-tinkerbell" SideroProviderName = "sidero" VCloudDirectorProviderName = "vcd" VSphereProviderName = "vsphere" @@ -62,23 +66,42 @@ const ( VclusterProviderName = "vcluster" VirtinkProviderName = "virtink" CoxEdgeProviderName = "coxedge" + ProxmoxProviderName = "proxmox" + K0smotronProviderName = "k0sproject-k0smotron" ) // Bootstrap providers. const ( - KubeadmBootstrapProviderName = "kubeadm" - TalosBootstrapProviderName = "talos" - MicroK8sBootstrapProviderName = "microk8s" - KubeKeyK3sBootstrapProviderName = "kubekey-k3s" + KubeadmBootstrapProviderName = "kubeadm" + TalosBootstrapProviderName = "talos" + MicroK8sBootstrapProviderName = "microk8s" + OracleCloudNativeBootstrapProviderName = "ocne" + KubeKeyK3sBootstrapProviderName = "kubekey-k3s" + RKE2BootstrapProviderName = "rke2" + K0smotronBootstrapProviderName = "k0sproject-k0smotron" ) // ControlPlane providers. const ( - KubeadmControlPlaneProviderName = "kubeadm" - TalosControlPlaneProviderName = "talos" - MicroK8sControlPlaneProviderName = "microk8s" - NestedControlPlaneProviderName = "nested" - KubeKeyK3sControlPlaneProviderName = "kubekey-k3s" + KubeadmControlPlaneProviderName = "kubeadm" + TalosControlPlaneProviderName = "talos" + MicroK8sControlPlaneProviderName = "microk8s" + NestedControlPlaneProviderName = "nested" + OracleCloudNativeControlPlaneProviderName = "ocne" + KubeKeyK3sControlPlaneProviderName = "kubekey-k3s" + KamajiControlPlaneProviderName = "kamaji" + RKE2ControlPlaneProviderName = "rke2" + K0smotronControlPlaneProviderName = "k0sproject-k0smotron" +) + +// IPAM providers. +const ( + InClusterIPAMProviderName = "in-cluster" +) + +// Add-on providers. +const ( + HelmAddonProviderName = "helm" ) // Other. @@ -128,6 +151,11 @@ func (p *providersClient) defaults() []Provider { }, // Infrastructure providers + &provider{ + name: LinodeProviderName, + url: "https://github.com/linode/cluster-api-provider-linode/releases/latest/infrastructure-components.yaml", + providerType: clusterctlv1.InfrastructureProviderType, + }, &provider{ name: AWSProviderName, url: "https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/latest/infrastructure-components.yaml", @@ -164,6 +192,11 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/kubernetes-sigs/cluster-api-provider-packet/releases/latest/infrastructure-components.yaml", providerType: clusterctlv1.InfrastructureProviderType, }, + &provider{ + name: TinkerbellProviderName, + url: "https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases/latest/infrastructure-components.yaml", + providerType: clusterctlv1.InfrastructureProviderType, + }, &provider{ name: Metal3ProviderName, url: "https://github.com/metal3-io/cluster-api-provider-metal3/releases/latest/infrastructure-components.yaml", @@ -219,6 +252,11 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/syself/cluster-api-provider-hetzner/releases/latest/infrastructure-components.yaml", providerType: clusterctlv1.InfrastructureProviderType, }, + &provider{ + name: HivelocityProviderName, + url: "https://github.com/hivelocity/cluster-api-provider-hivelocity/releases/latest/infrastructure-components.yaml", + providerType: clusterctlv1.InfrastructureProviderType, + }, &provider{ name: OutscaleProviderName, url: "https://github.com/outscale/cluster-api-provider-outscale/releases/latest/infrastructure-components.yaml", @@ -229,6 +267,11 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/releases/latest/infrastructure-components.yaml", providerType: clusterctlv1.InfrastructureProviderType, }, + &provider{ + name: InMemoryProviderName, + url: "https://github.com/kubernetes-sigs/cluster-api/releases/latest/infrastructure-components-in-memory-development.yaml", + providerType: clusterctlv1.InfrastructureProviderType, + }, &provider{ name: NutanixProviderName, url: "https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/releases/latest/infrastructure-components.yaml", @@ -254,6 +297,16 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/smartxworks/cluster-api-provider-virtink/releases/latest/infrastructure-components.yaml", providerType: clusterctlv1.InfrastructureProviderType, }, + &provider{ + name: ProxmoxProviderName, + url: "https://github.com/ionos-cloud/cluster-api-provider-proxmox/releases/latest/infrastructure-components.yaml", + providerType: clusterctlv1.InfrastructureProviderType, + }, + &provider{ + name: K0smotronProviderName, + url: "https://github.com/k0sproject/k0smotron/releases/latest/infrastructure-components.yaml", + providerType: clusterctlv1.InfrastructureProviderType, + }, // Bootstrap providers &provider{ @@ -276,6 +329,22 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/canonical/cluster-api-bootstrap-provider-microk8s/releases/latest/bootstrap-components.yaml", providerType: clusterctlv1.BootstrapProviderType, }, + &provider{ + name: OracleCloudNativeBootstrapProviderName, + url: "https://github.com/verrazzano/cluster-api-provider-ocne/releases/latest/bootstrap-components.yaml", + providerType: clusterctlv1.BootstrapProviderType, + }, + &provider{ + name: RKE2BootstrapProviderName, + url: "https://github.com/rancher-sandbox/cluster-api-provider-rke2/releases/latest/bootstrap-components.yaml", + providerType: clusterctlv1.BootstrapProviderType, + }, + &provider{ + name: K0smotronBootstrapProviderName, + url: "https://github.com/k0sproject/k0smotron/releases/latest/bootstrap-components.yaml", + providerType: clusterctlv1.BootstrapProviderType, + }, + // ControlPlane providers &provider{ name: KubeadmControlPlaneProviderName, @@ -302,6 +371,40 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/control-plane-components.yaml", providerType: clusterctlv1.ControlPlaneProviderType, }, + &provider{ + name: OracleCloudNativeControlPlaneProviderName, + url: "https://github.com/verrazzano/cluster-api-provider-ocne/releases/latest/control-plane-components.yaml", + providerType: clusterctlv1.ControlPlaneProviderType, + }, + &provider{ + name: KamajiControlPlaneProviderName, + url: "https://github.com/clastix/cluster-api-control-plane-provider-kamaji/releases/latest/control-plane-components.yaml", + providerType: clusterctlv1.ControlPlaneProviderType, + }, + &provider{ + name: RKE2ControlPlaneProviderName, + url: "https://github.com/rancher-sandbox/cluster-api-provider-rke2/releases/latest/control-plane-components.yaml", + providerType: clusterctlv1.ControlPlaneProviderType, + }, + &provider{ + name: K0smotronControlPlaneProviderName, + url: "https://github.com/k0sproject/k0smotron/releases/latest/control-plane-components.yaml", + providerType: clusterctlv1.ControlPlaneProviderType, + }, + + // IPAM providers + &provider{ + name: InClusterIPAMProviderName, + url: "https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster/releases/latest/ipam-components.yaml", + providerType: clusterctlv1.IPAMProviderType, + }, + + // Add-on providers + &provider{ + name: HelmAddonProviderName, + url: "https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/latest/addon-components.yaml", + providerType: clusterctlv1.AddonProviderType, + }, } return defaults @@ -401,16 +504,18 @@ func validateProvider(r Provider) error { clusterctlv1.InfrastructureProviderType, clusterctlv1.ControlPlaneProviderType, clusterctlv1.IPAMProviderType, - clusterctlv1.RuntimeExtensionProviderType: + clusterctlv1.RuntimeExtensionProviderType, + clusterctlv1.AddonProviderType: break default: - return errors.Errorf("invalid provider type. Allowed values are [%s, %s, %s, %s, %s, %s]", + return errors.Errorf("invalid provider type. Allowed values are [%s, %s, %s, %s, %s, %s, %s]", clusterctlv1.CoreProviderType, clusterctlv1.BootstrapProviderType, clusterctlv1.InfrastructureProviderType, clusterctlv1.ControlPlaneProviderType, clusterctlv1.IPAMProviderType, - clusterctlv1.RuntimeExtensionProviderType) + clusterctlv1.RuntimeExtensionProviderType, + clusterctlv1.AddonProviderType) } return nil } diff --git a/cmd/clusterctl/client/config/providers_client_test.go b/cmd/clusterctl/client/config/providers_client_test.go index a54f3de3f061..e70cc6956439 100644 --- a/cmd/clusterctl/client/config/providers_client_test.go +++ b/cmd/clusterctl/client/config/providers_client_test.go @@ -23,6 +23,7 @@ import ( "testing" . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" @@ -41,6 +42,10 @@ func Test_providers_List(t *testing.T) { }) defaultsAndZZZ := append(defaults, NewProvider("zzz", "https://zzz/infrastructure-components.yaml", "InfrastructureProvider")) + // AddonProviders are at the end of the list so we want to make sure this InfrastructureProvider is before the AddonProviders. + sort.Slice(defaultsAndZZZ, func(i, j int) bool { + return defaultsAndZZZ[i].Less(defaultsAndZZZ[j]) + }) defaultsWithOverride := append([]Provider{}, defaults...) defaultsWithOverride[0] = NewProvider(defaults[0].Name(), "https://zzz/infrastructure-components.yaml", defaults[0].Type()) @@ -135,6 +140,9 @@ func Test_providers_List(t *testing.T) { wantErr: true, }, } + + format.MaxLength = 15000 // This way it doesn't truncate the output on test failure + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) @@ -156,7 +164,7 @@ func Test_providers_List(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } @@ -250,7 +258,7 @@ func Test_validateProvider(t *testing.T) { if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } }) } @@ -271,7 +279,7 @@ func Test_providers_Defaults(t *testing.T) { for _, d := range defaults { err := validateProvider(d) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } } @@ -353,7 +361,7 @@ func Test_providers_Get(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } diff --git a/cmd/clusterctl/client/config/reader_memory.go b/cmd/clusterctl/client/config/reader_memory.go index 05ddeb2980b4..1ad1c346cfeb 100644 --- a/cmd/clusterctl/client/config/reader_memory.go +++ b/cmd/clusterctl/client/config/reader_memory.go @@ -17,6 +17,8 @@ limitations under the License. package config import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/yaml" @@ -42,7 +44,7 @@ func NewMemoryReader() *MemoryReader { } // Init initialize the reader. -func (f *MemoryReader) Init(_ string) error { +func (f *MemoryReader) Init(_ context.Context, _ string) error { data, err := yaml.Marshal(f.providers) if err != nil { return err diff --git a/cmd/clusterctl/client/config/reader_memory_test.go b/cmd/clusterctl/client/config/reader_memory_test.go index 0ea0232a5d97..6cd907c0f9cc 100644 --- a/cmd/clusterctl/client/config/reader_memory_test.go +++ b/cmd/clusterctl/client/config/reader_memory_test.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "context" "testing" . "github.com/onsi/gomega" @@ -52,8 +53,11 @@ func TestMemoryReader(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + f := NewMemoryReader() - g.Expect(f.Init("")).To(Succeed()) + g.Expect(f.Init(ctx, "")).To(Succeed()) for _, p := range tt.providers { _, err := f.AddProvider(p.Name, p.Type, p.URL) g.Expect(err).ToNot(HaveOccurred()) @@ -64,11 +68,11 @@ func TestMemoryReader(t *testing.T) { providersOut := []configProvider{} g.Expect(f.UnmarshalKey("providers", &providersOut)).To(Succeed()) - g.Expect(providersOut).To(Equal(tt.providers)) + g.Expect(providersOut).To(BeComparableTo(tt.providers)) imagesOut := map[string]imageMeta{} g.Expect(f.UnmarshalKey("images", &imagesOut)).To(Succeed()) - g.Expect(imagesOut).To(Equal(tt.imageMetas)) + g.Expect(imagesOut).To(BeComparableTo(tt.imageMetas)) for n, v := range tt.variables { outV, err := f.Get(n) diff --git a/cmd/clusterctl/client/config/reader_viper.go b/cmd/clusterctl/client/config/reader_viper.go index 113423100efe..02bb984c5876 100644 --- a/cmd/clusterctl/client/config/reader_viper.go +++ b/cmd/clusterctl/client/config/reader_viper.go @@ -27,17 +27,19 @@ import ( "strings" "time" + "github.com/adrg/xdg" "github.com/pkg/errors" "github.com/spf13/viper" - "k8s.io/client-go/util/homedir" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" ) const ( - // ConfigFolder defines the name of the config folder under $home. + // ConfigFolder defines the old name of the config folder under $HOME. ConfigFolder = ".cluster-api" - // ConfigName defines the name of the config file under ConfigFolder. + // ConfigFolderXDG defines the name of the config folder under $XDG_CONFIG_HOME. + ConfigFolderXDG = "cluster-api" + // ConfigName defines the name of the config file under ConfigFolderXDG. ConfigName = "clusterctl" // DownloadConfigFile is the config file when fetching the config from a remote location. DownloadConfigFile = "clusterctl-download.yaml" @@ -58,18 +60,22 @@ func injectConfigPaths(configPaths []string) viperReaderOption { } // newViperReader returns a viperReader. -func newViperReader(opts ...viperReaderOption) Reader { +func newViperReader(opts ...viperReaderOption) (Reader, error) { + configDirectory, err := xdg.ConfigFile(ConfigFolderXDG) + if err != nil { + return nil, err + } vr := &viperReader{ - configPaths: []string{filepath.Join(homedir.HomeDir(), ConfigFolder)}, + configPaths: []string{configDirectory, filepath.Join(xdg.Home, ConfigFolder)}, } for _, o := range opts { o(vr) } - return vr + return vr, nil } // Init initialize the viperReader. -func (v *viperReader) Init(path string) error { +func (v *viperReader) Init(ctx context.Context, path string) error { log := logf.Log // Configure viper for reading environment variables as well, and more specifically: @@ -89,16 +95,18 @@ func (v *viperReader) Init(path string) error { switch { case url.Scheme == "https" || url.Scheme == "http": - configPath := filepath.Join(homedir.HomeDir(), ConfigFolder) + var configDirectory string if len(v.configPaths) > 0 { - configPath = v.configPaths[0] - } - if err := os.MkdirAll(configPath, os.ModePerm); err != nil { - return err + configDirectory = v.configPaths[0] + } else { + configDirectory, err = xdg.ConfigFile(ConfigFolderXDG) + if err != nil { + return err + } } - downloadConfigFile := filepath.Join(configPath, DownloadConfigFile) - err = downloadFile(url.String(), downloadConfigFile) + downloadConfigFile := filepath.Join(configDirectory, DownloadConfigFile) + err = downloadFile(ctx, url.String(), downloadConfigFile) if err != nil { return err } @@ -112,14 +120,14 @@ func (v *viperReader) Init(path string) error { viper.SetConfigFile(path) } } else { - // Checks if there is a default .cluster-api/clusterctl{.extension} file in home directory + // Checks if there is a default $XDG_CONFIG_HOME/cluster-api/clusterctl{.extension} or $HOME/.cluster-api/clusterctl{.extension} file if !v.checkDefaultConfig() { // since there is no default config to read from, just skip // reading in config log.V(5).Info("No default config file available") return nil } - // Configure viper for reading .cluster-api/clusterctl{.extension} in home directory + // Configure viper for reading $XDG_CONFIG_HOME/cluster-api/clusterctl{.extension} or $HOME/.cluster-api/clusterctl{.extension} file viper.SetConfigName(ConfigName) for _, p := range v.configPaths { viper.AddConfigPath(p) @@ -129,13 +137,11 @@ func (v *viperReader) Init(path string) error { if err := viper.ReadInConfig(); err != nil { return err } - log.V(5).Info("Using configuration", "File", viper.ConfigFileUsed()) + log.V(5).Info("Using configuration", "file", viper.ConfigFileUsed()) return nil } -func downloadFile(url string, filepath string) error { - ctx := context.TODO() - +func downloadFile(ctx context.Context, url string, filepath string) error { // Create the file out, err := os.Create(filepath) //nolint:gosec // No security issue: filepath is safe. if err != nil { diff --git a/cmd/clusterctl/client/config/reader_viper_test.go b/cmd/clusterctl/client/config/reader_viper_test.go index 8e0b5b209821..b5f675416177 100644 --- a/cmd/clusterctl/client/config/reader_viper_test.go +++ b/cmd/clusterctl/client/config/reader_viper_test.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "context" "fmt" "net/http" "net/http/httptest" @@ -34,11 +35,11 @@ func Test_viperReader_Init(t *testing.T) { // Change HOME dir and do not specify config file // (.cluster-api/clusterctl) in it. clusterctlHomeDir, err := os.MkdirTemp("", "clusterctl-default") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(clusterctlHomeDir) dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) configFile := filepath.Join(dir, "clusterctl.yaml") @@ -48,15 +49,15 @@ func Test_viperReader_Init(t *testing.T) { g.Expect(os.WriteFile(configFileBadContents, []byte("bad-contents"), 0600)).To(Succeed()) // To test the remote config file - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "text/plain") _, err := w.Write([]byte("bar: bar")) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) })) defer ts.Close() // To test the remote config file when fails to fetch - tsFail := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tsFail := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotFound) })) defer tsFail.Close() @@ -108,12 +109,15 @@ func Test_viperReader_Init(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gg := NewWithT(t) - v := newViperReader(injectConfigPaths(tt.configDirs)) + + ctx := context.Background() + + v, _ := newViperReader(injectConfigPaths(tt.configDirs)) if tt.expectErr { - gg.Expect(v.Init(tt.configPath)).ToNot(Succeed()) + gg.Expect(v.Init(ctx, tt.configPath)).ToNot(Succeed()) return } - gg.Expect(v.Init(tt.configPath)).To(Succeed()) + gg.Expect(v.Init(ctx, tt.configPath)).To(Succeed()) }) } } @@ -122,7 +126,7 @@ func Test_viperReader_Get(t *testing.T) { g := NewWithT(t) dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) _ = os.Setenv("FOO", "foo") @@ -168,9 +172,11 @@ func Test_viperReader_Get(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - v := newViperReader(injectConfigPaths([]string{dir})) + ctx := context.Background() + + v, _ := newViperReader(injectConfigPaths([]string{dir})) - gs.Expect(v.Init(configFile)).To(Succeed()) + gs.Expect(v.Init(ctx, configFile)).To(Succeed()) got, err := v.Get(tt.args.key) if tt.wantErr { @@ -178,7 +184,7 @@ func Test_viperReader_Get(t *testing.T) { return } - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(got).To(Equal(tt.want)) }) } @@ -186,17 +192,21 @@ func Test_viperReader_Get(t *testing.T) { func Test_viperReader_GetWithoutDefaultConfig(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) _ = os.Setenv("FOO_FOO", "bar") - v := newViperReader(injectConfigPaths([]string{dir})) - g.Expect(v.Init("")).To(Succeed()) + v, err := newViperReader(injectConfigPaths([]string{dir})) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(v.Init(ctx, "")).To(Succeed()) got, err := v.Get("FOO_FOO") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal("bar")) } @@ -204,7 +214,7 @@ func Test_viperReader_Set(t *testing.T) { g := NewWithT(t) dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) _ = os.Setenv("FOO", "foo") @@ -235,14 +245,16 @@ func Test_viperReader_Set(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) + ctx := context.Background() + v := &viperReader{} - gs.Expect(v.Init(configFile)).To(Succeed()) + gs.Expect(v.Init(ctx, configFile)).To(Succeed()) v.Set(tt.args.key, tt.args.value) got, err := v.Get(tt.args.key) - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(got).To(Equal(tt.want)) }) } @@ -251,7 +263,7 @@ func Test_viperReader_Set(t *testing.T) { func Test_viperReader_checkDefaultConfig(t *testing.T) { g := NewWithT(t) dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) dir = strings.TrimSuffix(dir, "/") diff --git a/cmd/clusterctl/client/config/variables_client_test.go b/cmd/clusterctl/client/config/variables_client_test.go index e7fe36efb28d..f51d06cac92d 100644 --- a/cmd/clusterctl/client/config/variables_client_test.go +++ b/cmd/clusterctl/client/config/variables_client_test.go @@ -61,7 +61,6 @@ func Test_variables_Get(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - p := &variablesClient{ reader: reader, } @@ -71,7 +70,7 @@ func Test_variables_Get(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } diff --git a/cmd/clusterctl/client/config_test.go b/cmd/clusterctl/client/config_test.go index 690ab55af094..37a72bfb02dd 100644 --- a/cmd/clusterctl/client/config_test.go +++ b/cmd/clusterctl/client/config_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "fmt" "os" "path/filepath" @@ -27,7 +28,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -51,19 +52,26 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { { name: "Returns default providers", field: field{ - client: newFakeClient(newFakeConfig()), + client: newFakeClient(context.Background(), newFakeConfig(context.Background())), }, // note: these will be sorted by name by the Providers() call, so be sure they are in alphabetical order here too wantProviders: []string{ config.ClusterAPIProviderName, + config.K0smotronBootstrapProviderName, config.KubeadmBootstrapProviderName, config.KubeKeyK3sBootstrapProviderName, config.MicroK8sBootstrapProviderName, + config.OracleCloudNativeBootstrapProviderName, + config.RKE2BootstrapProviderName, config.TalosBootstrapProviderName, + config.K0smotronControlPlaneProviderName, + config.KamajiControlPlaneProviderName, config.KubeadmControlPlaneProviderName, config.KubeKeyK3sControlPlaneProviderName, config.MicroK8sControlPlaneProviderName, config.NestedControlPlaneProviderName, + config.OracleCloudNativeControlPlaneProviderName, + config.RKE2ControlPlaneProviderName, config.TalosControlPlaneProviderName, config.AWSProviderName, config.AzureProviderName, @@ -74,9 +82,13 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { config.DockerProviderName, config.GCPProviderName, config.HetznerProviderName, + config.HivelocityProviderName, config.IBMCloudProviderName, + config.InMemoryProviderName, + config.K0smotronProviderName, config.KubeKeyProviderName, config.KubevirtProviderName, + config.LinodeProviderName, config.MAASProviderName, config.Metal3ProviderName, config.NestedProviderName, @@ -85,31 +97,42 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { config.OpenStackProviderName, config.OutscaleProviderName, config.PacketProviderName, + config.ProxmoxProviderName, config.SideroProviderName, + config.TinkerbellProviderName, config.VCloudDirectorProviderName, config.VclusterProviderName, config.VirtinkProviderName, config.VSphereProviderName, + config.InClusterIPAMProviderName, + config.HelmAddonProviderName, }, wantErr: false, }, { name: "Returns default providers and custom providers if defined", field: field{ - client: newFakeClient(newFakeConfig().WithProvider(customProviderConfig)), + client: newFakeClient(context.Background(), newFakeConfig(context.Background()).WithProvider(customProviderConfig)), }, // note: these will be sorted by name by the Providers() call, so be sure they are in alphabetical order here too wantProviders: []string{ config.ClusterAPIProviderName, customProviderConfig.Name(), + config.K0smotronBootstrapProviderName, config.KubeadmBootstrapProviderName, config.KubeKeyK3sBootstrapProviderName, config.MicroK8sBootstrapProviderName, + config.OracleCloudNativeBootstrapProviderName, + config.RKE2BootstrapProviderName, config.TalosBootstrapProviderName, + config.K0smotronControlPlaneProviderName, + config.KamajiControlPlaneProviderName, config.KubeadmControlPlaneProviderName, config.KubeKeyK3sControlPlaneProviderName, config.MicroK8sControlPlaneProviderName, config.NestedControlPlaneProviderName, + config.OracleCloudNativeControlPlaneProviderName, + config.RKE2ControlPlaneProviderName, config.TalosControlPlaneProviderName, config.AWSProviderName, config.AzureProviderName, @@ -120,9 +143,13 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { config.DockerProviderName, config.GCPProviderName, config.HetznerProviderName, + config.HivelocityProviderName, config.IBMCloudProviderName, + config.InMemoryProviderName, + config.K0smotronProviderName, config.KubeKeyProviderName, config.KubevirtProviderName, + config.LinodeProviderName, config.MAASProviderName, config.Metal3ProviderName, config.NestedProviderName, @@ -131,11 +158,15 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { config.OpenStackProviderName, config.OutscaleProviderName, config.PacketProviderName, + config.ProxmoxProviderName, config.SideroProviderName, + config.TinkerbellProviderName, config.VCloudDirectorProviderName, config.VclusterProviderName, config.VirtinkProviderName, config.VSphereProviderName, + config.InClusterIPAMProviderName, + config.HelmAddonProviderName, }, wantErr: false, }, @@ -150,7 +181,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(HaveLen(len(tt.wantProviders))) for i, gotProvider := range got { @@ -162,15 +193,17 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { } func Test_clusterctlClient_GetProviderComponents(t *testing.T) { - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithProvider(capiProviderConfig) - repository1 := newFakeRepository(capiProviderConfig, config1). + repository1 := newFakeRepository(ctx, capiProviderConfig, config1). WithPaths("root", "components.yaml"). WithDefaultVersion("v1.0.0"). WithFile("v1.0.0", "components.yaml", componentsYAML("ns1")) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithRepository(repository1) type args struct { @@ -212,15 +245,17 @@ func Test_clusterctlClient_GetProviderComponents(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + options := ComponentsOptions{ TargetNamespace: tt.args.targetNameSpace, } - got, err := client.GetProviderComponents(tt.args.provider, capiProviderConfig.Type(), options) + got, err := client.GetProviderComponents(ctx, tt.args.provider, capiProviderConfig.Type(), options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got.Name()).To(Equal(tt.want.provider.Name())) g.Expect(got.Version()).To(Equal(tt.want.version)) @@ -231,13 +266,15 @@ func Test_clusterctlClient_GetProviderComponents(t *testing.T) { func Test_getComponentsByName_withEmptyVariables(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create a fake config with a provider named P1 and a variable named foo. repository1Config := config.NewProvider("p1", "url", clusterctlv1.InfrastructureProviderType) - config1 := newFakeConfig(). + config1 := newFakeConfig(ctx). WithProvider(repository1Config) - repository1 := newFakeRepository(repository1Config, config1). + repository1 := newFakeRepository(ctx, repository1Config, config1). WithPaths("root", "components.yaml"). WithDefaultVersion("v1.0.0"). WithFile("v1.0.0", "components.yaml", componentsYAML("${FOO}")). @@ -252,7 +289,7 @@ func Test_getComponentsByName_withEmptyVariables(t *testing.T) { // Create a new fakeClient that allows to execute tests on the fake config, // the fake repositories and the fake cluster. - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithRepository(repository1). WithCluster(cluster1) @@ -260,8 +297,8 @@ func Test_getComponentsByName_withEmptyVariables(t *testing.T) { TargetNamespace: "ns1", SkipTemplateProcess: true, } - components, err := client.GetProviderComponents(repository1Config.Name(), repository1Config.Type(), options) - g.Expect(err).NotTo(HaveOccurred()) + components, err := client.GetProviderComponents(ctx, repository1Config.Name(), repository1Config.Type(), options) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(components.Variables()).To(HaveLen(1)) g.Expect(components.Name()).To(Equal("p1")) } @@ -283,8 +320,8 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { ClusterName: "foo", TargetNamespace: "bar", KubernetesVersion: "v1.2.3", - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(2), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](2), }, }, wantVars: map[string]string{ @@ -303,8 +340,8 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { ClusterName: "foo", TargetNamespace: "bar", KubernetesVersion: "", // empty means to use value from env variables/config file - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(2), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](2), }, }, wantVars: map[string]string{ @@ -381,7 +418,7 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { ClusterName: "foo", TargetNamespace: "bar", KubernetesVersion: "v1.2.3", - ControlPlaneMachineCount: pointer.Int64(-1), + ControlPlaneMachineCount: ptr.To[int64](-1), }, }, wantErr: true, @@ -393,8 +430,8 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { ClusterName: "foo", TargetNamespace: "bar", KubernetesVersion: "v1.2.3", - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(-1), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](-1), }, }, wantErr: true, @@ -404,7 +441,9 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - config := newFakeConfig(). + ctx := context.Background() + + config := newFakeConfig(ctx). WithVar("KUBERNETES_VERSION", "v3.4.5") // with this line we are simulating an env var c := &clusterctlClient{ @@ -415,11 +454,11 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) for name, wantValue := range tt.wantVars { gotValue, err := config.Variables().Get(name) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(gotValue).To(Equal(wantValue)) } }) @@ -427,7 +466,9 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { } func Test_clusterctlClient_templateOptionsToVariables_withExistingMachineCountVariables(t *testing.T) { - configClient := newFakeConfig(). + ctx := context.Background() + + configClient := newFakeConfig(ctx). WithVar("CONTROL_PLANE_MACHINE_COUNT", "3"). WithVar("WORKER_MACHINE_COUNT", "10") @@ -466,11 +507,13 @@ func Test_clusterctlClient_templateOptionsToVariables_withExistingMachineCountVa func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + rawTemplate := templateYAML("ns3", "${ CLUSTER_NAME }") // Template on a file tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(tmpDir) path := filepath.Join(tmpDir, "cluster-template.yaml") @@ -491,10 +534,10 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, } - config1 := newFakeConfig(). + config1 := newFakeConfig(ctx). WithProvider(infraProviderConfig) - repository1 := newFakeRepository(infraProviderConfig, config1). + repository1 := newFakeRepository(ctx, infraProviderConfig, config1). WithPaths("root", "components"). WithDefaultVersion("v3.0.0"). WithFile("v3.0.0", "cluster-template.yaml", rawTemplate) @@ -504,7 +547,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { WithObjs(configMap). WithObjs(test.FakeCAPISetupObjects()...) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithCluster(cluster1). WithRepository(repository1) @@ -535,7 +578,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -555,7 +598,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -575,7 +618,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, ClusterName: "test", TargetNamespace: "", // empty triggers usage of the current namespace - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -594,7 +637,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -615,7 +658,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -629,18 +672,18 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - got, err := client.GetClusterTemplate(tt.args.options) + got, err := client.GetClusterTemplate(ctx, tt.args.options) if tt.wantErr { gs.Expect(err).To(HaveOccurred()) return } - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(got.Variables()).To(Equal(tt.want.variables)) gs.Expect(got.TargetNamespace()).To(Equal(tt.want.targetNamespace)) gotYaml, err := got.Yaml() - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(gotYaml).To(Equal(tt.want.yaml)) }) } @@ -649,11 +692,13 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { func Test_clusterctlClient_GetClusterTemplate_withClusterClass(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + rawTemplate := mangedTopologyTemplateYAML("ns4", "${CLUSTER_NAME}", "dev") rawClusterClassTemplate := clusterClassYAML("ns4", "dev") - config1 := newFakeConfig().WithProvider(infraProviderConfig) + config1 := newFakeConfig(ctx).WithProvider(infraProviderConfig) - repository1 := newFakeRepository(infraProviderConfig, config1). + repository1 := newFakeRepository(ctx, infraProviderConfig, config1). WithPaths("root", "components"). WithDefaultVersion("v3.0.0"). WithFile("v3.0.0", "cluster-template-dev.yaml", rawTemplate). @@ -663,12 +708,12 @@ func Test_clusterctlClient_GetClusterTemplate_withClusterClass(t *testing.T) { WithProviderInventory(infraProviderConfig.Name(), infraProviderConfig.Type(), "v3.0.0", "ns4"). WithObjs(test.FakeCAPISetupObjects()...) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithCluster(cluster1). WithRepository(repository1) // Assert output - got, err := client.GetClusterTemplate(GetClusterTemplateOptions{ + got, err := client.GetClusterTemplate(ctx, GetClusterTemplateOptions{ Kubeconfig: Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, ClusterName: "test", TargetNamespace: "ns1", @@ -676,7 +721,7 @@ func Test_clusterctlClient_GetClusterTemplate_withClusterClass(t *testing.T) { Flavor: "dev", }, }) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got.Variables()).To(Equal([]string{"CLUSTER_NAME"})) g.Expect(got.TargetNamespace()).To(Equal("ns1")) g.Expect(got.Objs()).To(ContainElement(MatchClusterClass("dev", "ns1"))) @@ -688,7 +733,7 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { // Template on a file tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(tmpDir) path := filepath.Join(tmpDir, "cluster-template.yaml") @@ -709,18 +754,18 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { }, } - config1 := newFakeConfig(). + config1 := newFakeConfig(ctx). WithProvider(infraProviderConfig) cluster1 := newFakeCluster(cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, config1). WithObjs(configMap) - repository1 := newFakeRepository(infraProviderConfig, config1). + repository1 := newFakeRepository(ctx, infraProviderConfig, config1). WithPaths("root", "components"). WithDefaultVersion("v3.0.0"). WithFile("v3.0.0", "cluster-template.yaml", rawTemplate) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithCluster(cluster1). WithRepository(repository1) @@ -751,7 +796,7 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -771,7 +816,7 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, wantErr: true, @@ -786,7 +831,7 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -807,7 +852,7 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -821,18 +866,18 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - got, err := client.GetClusterTemplate(tt.args.options) + got, err := client.GetClusterTemplate(ctx, tt.args.options) if tt.wantErr { gs.Expect(err).To(HaveOccurred()) return } - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(got.Variables()).To(Equal(tt.want.variables)) gs.Expect(got.TargetNamespace()).To(Equal(tt.want.targetNamespace)) gotYaml, err := got.Yaml() - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(gotYaml).To(Equal(tt.want.yaml)) }) } @@ -845,9 +890,9 @@ func newFakeClientWithoutCluster(configClient config.Client) *fakeClient { } var err error - fake.internalClient, err = newClusterctlClient("fake-config", + fake.internalClient, err = newClusterctlClient(context.Background(), "fake-config", InjectConfig(fake.configClient), - InjectRepositoryFactory(func(input RepositoryClientFactoryInput) (repository.Client, error) { + InjectRepositoryFactory(func(_ context.Context, input RepositoryClientFactoryInput) (repository.Client, error) { if _, ok := fake.repositories[input.Provider.ManifestLabel()]; !ok { return nil, errors.Errorf("repository for kubeconfig %q does not exist", input.Provider.ManifestLabel()) } @@ -864,10 +909,12 @@ func newFakeClientWithoutCluster(configClient config.Client) *fakeClient { func Test_clusterctlClient_GetClusterTemplate_withoutCluster(t *testing.T) { rawTemplate := templateYAML("ns3", "${ CLUSTER_NAME }") - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithProvider(infraProviderConfig) - repository1 := newFakeRepository(infraProviderConfig, config1). + repository1 := newFakeRepository(ctx, infraProviderConfig, config1). WithPaths("root", "components"). WithDefaultVersion("v3.0.0"). WithFile("v3.0.0", "cluster-template.yaml", rawTemplate) @@ -902,7 +949,7 @@ func Test_clusterctlClient_GetClusterTemplate_withoutCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -922,7 +969,7 @@ func Test_clusterctlClient_GetClusterTemplate_withoutCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, wantErr: true, @@ -932,18 +979,18 @@ func Test_clusterctlClient_GetClusterTemplate_withoutCluster(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - got, err := client.GetClusterTemplate(tt.args.options) + got, err := client.GetClusterTemplate(ctx, tt.args.options) if tt.wantErr { gs.Expect(err).To(HaveOccurred()) return } - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(got.Variables()).To(Equal(tt.want.variables)) gs.Expect(got.TargetNamespace()).To(Equal(tt.want.targetNamespace)) gotYaml, err := got.Yaml() - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(gotYaml).To(Equal(tt.want.yaml)) }) } @@ -955,7 +1002,7 @@ func Test_clusterctlClient_ProcessYAML(t *testing.T) { v2: ${VAR2=default2} v3: ${VAR3:-default3}` dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) templateFile := filepath.Join(dir, "template.yaml") @@ -1028,14 +1075,14 @@ v3: default3`, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - config1 := newFakeConfig(). + t.Run(tt.name, func(*testing.T) { + config1 := newFakeConfig(ctx). WithProvider(infraProviderConfig) cluster1 := newFakeCluster(cluster.Kubeconfig{}, config1) - client := newFakeClient(config1).WithCluster(cluster1) + client := newFakeClient(ctx, config1).WithCluster(cluster1) - printer, err := client.ProcessYAML(tt.options) + printer, err := client.ProcessYAML(ctx, tt.options) if tt.expectErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/delete.go b/cmd/clusterctl/client/delete.go index 77ee268a3b15..68b124b48668 100644 --- a/cmd/clusterctl/client/delete.go +++ b/cmd/clusterctl/client/delete.go @@ -17,8 +17,11 @@ limitations under the License. package client import ( + "context" + "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -48,6 +51,9 @@ type DeleteOptions struct { // RuntimeExtensionProviders and versions (e.g. test:v0.0.1) to delete from the management cluster. RuntimeExtensionProviders []string + // AddonProviders and versions (e.g. helm:v0.1.0) to delete from the management cluster. + AddonProviders []string + // DeleteAll set for deletion of all the providers. DeleteAll bool @@ -62,24 +68,24 @@ type DeleteOptions struct { SkipInventory bool } -func (c *clusterctlClient) Delete(options DeleteOptions) error { +func (c *clusterctlClient) Delete(ctx context.Context, options DeleteOptions) error { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return err } // Ensure this command only runs against management clusters with the current Cluster API contract. - if err := clusterClient.ProviderInventory().CheckCAPIContract(); err != nil { + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx); err != nil { return err } // Ensure the custom resource definitions required by clusterctl are in place. - if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return err } // Get the list of installed providers. - installedProviders, err := clusterClient.ProviderInventory().List() + installedProviders, err := clusterClient.ProviderInventory().List(ctx) if err != nil { return err } @@ -122,9 +128,14 @@ func (c *clusterctlClient) Delete(options DeleteOptions) error { return err } + providers, err = appendProviders(providers, clusterctlv1.AddonProviderType, options.AddonProviders...) + if err != nil { + return err + } + for _, provider := range providers { // Try to detect the namespace where the provider lives - provider.Namespace, err = clusterClient.ProviderInventory().GetProviderNamespace(provider.ProviderName, provider.GetProviderType()) + provider.Namespace, err = clusterClient.ProviderInventory().GetProviderNamespace(ctx, provider.ProviderName, provider.GetProviderType()) if err != nil { return err } @@ -133,7 +144,7 @@ func (c *clusterctlClient) Delete(options DeleteOptions) error { } if provider.Version != "" { - version, err := clusterClient.ProviderInventory().GetProviderVersion(provider.ProviderName, provider.GetProviderType()) + version, err := clusterClient.ProviderInventory().GetProviderVersion(ctx, provider.ProviderName, provider.GetProviderType()) if err != nil { return err } @@ -146,9 +157,22 @@ func (c *clusterctlClient) Delete(options DeleteOptions) error { } } + if options.IncludeCRDs { + errList := []error{} + for _, provider := range providersToDelete { + err = clusterClient.ProviderComponents().ValidateNoObjectsExist(ctx, provider) + if err != nil { + errList = append(errList, err) + } + } + if len(errList) > 0 { + return kerrors.NewAggregate(errList) + } + } + // Delete the selected providers. for _, provider := range providersToDelete { - if err := clusterClient.ProviderComponents().Delete(cluster.DeleteOptions{Provider: provider, IncludeNamespace: options.IncludeNamespace, IncludeCRDs: options.IncludeCRDs, SkipInventory: options.SkipInventory}); err != nil { + if err := clusterClient.ProviderComponents().Delete(ctx, cluster.DeleteOptions{Provider: provider, IncludeNamespace: options.IncludeNamespace, IncludeCRDs: options.IncludeCRDs, SkipInventory: options.SkipInventory}); err != nil { return err } } diff --git a/cmd/clusterctl/client/delete_test.go b/cmd/clusterctl/client/delete_test.go index 1cac4abea760..91fb05e4d76f 100644 --- a/cmd/clusterctl/client/delete_test.go +++ b/cmd/clusterctl/client/delete_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "testing" . "github.com/onsi/gomega" @@ -66,6 +67,27 @@ func Test_clusterctlClient_Delete(t *testing.T) { wantProviders: sets.Set[string]{}, wantErr: false, }, + { + name: "Delete all the providers including CRDs", + fields: fields{ + client: fakeClusterForDelete(), + }, + args: args{ + options: DeleteOptions{ + Kubeconfig: Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, + IncludeNamespace: false, + IncludeCRDs: true, + SkipInventory: false, + CoreProvider: "", + BootstrapProviders: nil, + InfrastructureProviders: nil, + ControlPlaneProviders: nil, + DeleteAll: true, // delete all the providers + }, + }, + wantProviders: sets.Set[string]{}, + wantErr: false, + }, { name: "Delete single provider auto-detect namespace", fields: fields{ @@ -167,19 +189,21 @@ func Test_clusterctlClient_Delete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.Delete(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.Delete(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) input := cluster.Kubeconfig(tt.args.options.Kubeconfig) proxy := tt.fields.client.clusters[input].Proxy() gotProviders := &clusterctlv1.ProviderList{} - c, err := proxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + c, err := proxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(c.List(ctx, gotProviders)).To(Succeed()) gotProvidersSet := sets.Set[string]{} @@ -187,24 +211,26 @@ func Test_clusterctlClient_Delete(t *testing.T) { gotProvidersSet.Insert(gotProvider.Name) } - g.Expect(gotProvidersSet).To(Equal(tt.wantProviders)) + g.Expect(gotProvidersSet).To(BeComparableTo(tt.wantProviders)) }) } } // clusterctl client for a management cluster with capi and bootstrap provider. func fakeClusterForDelete() *fakeClient { - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithVar("var", "value"). WithProvider(capiProviderConfig). WithProvider(bootstrapProviderConfig). WithProvider(controlPlaneProviderConfig). WithProvider(infraProviderConfig) - repository1 := newFakeRepository(capiProviderConfig, config1) - repository2 := newFakeRepository(bootstrapProviderConfig, config1) - repository3 := newFakeRepository(controlPlaneProviderConfig, config1) - repository4 := newFakeRepository(infraProviderConfig, config1) + repository1 := newFakeRepository(ctx, capiProviderConfig, config1) + repository2 := newFakeRepository(ctx, bootstrapProviderConfig, config1) + repository3 := newFakeRepository(ctx, controlPlaneProviderConfig, config1) + repository4 := newFakeRepository(ctx, infraProviderConfig, config1) cluster1 := newFakeCluster(cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, config1) cluster1.fakeProxy.WithProviderInventory(capiProviderConfig.Name(), capiProviderConfig.Type(), providerVersion, "capi-system") @@ -213,7 +239,7 @@ func fakeClusterForDelete() *fakeClient { cluster1.fakeProxy.WithProviderInventory(infraProviderConfig.Name(), infraProviderConfig.Type(), providerVersion, namespace) cluster1.fakeProxy.WithFakeCAPISetup() - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). // fake repository for capi, bootstrap, controlplane and infra provider (matching provider's config) WithRepository(repository1). WithRepository(repository2). diff --git a/cmd/clusterctl/client/describe.go b/cmd/clusterctl/client/describe.go index 2bdf1464537d..2e98ef0325a3 100644 --- a/cmd/clusterctl/client/describe.go +++ b/cmd/clusterctl/client/describe.go @@ -60,7 +60,7 @@ type DescribeClusterOptions struct { } // DescribeCluster returns the object tree representing the status of a Cluster API cluster. -func (c *clusterctlClient) DescribeCluster(options DescribeClusterOptions) (*tree.ObjectTree, error) { +func (c *clusterctlClient) DescribeCluster(ctx context.Context, options DescribeClusterOptions) (*tree.ObjectTree, error) { // gets access to the management cluster cluster, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { @@ -68,7 +68,7 @@ func (c *clusterctlClient) DescribeCluster(options DescribeClusterOptions) (*tre } // Ensure this command only runs against management clusters with the current Cluster API contract. - if err := cluster.ProviderInventory().CheckCAPIContract(); err != nil { + if err := cluster.ProviderInventory().CheckCAPIContract(ctx); err != nil { return nil, err } @@ -82,13 +82,13 @@ func (c *clusterctlClient) DescribeCluster(options DescribeClusterOptions) (*tre } // Fetch the Cluster client. - client, err := cluster.Proxy().NewClient() + client, err := cluster.Proxy().NewClient(ctx) if err != nil { return nil, err } // Gets the object tree representing the status of a Cluster API cluster. - return tree.Discovery(context.TODO(), client, options.Namespace, options.ClusterName, tree.DiscoverOptions{ + return tree.Discovery(ctx, client, options.Namespace, options.ClusterName, tree.DiscoverOptions{ ShowOtherConditions: options.ShowOtherConditions, ShowMachineSets: options.ShowMachineSets, ShowClusterResourceSets: options.ShowClusterResourceSets, diff --git a/cmd/clusterctl/client/generate_provider.go b/cmd/clusterctl/client/generate_provider.go index b9f24f8d8a31..a59c6aee0158 100644 --- a/cmd/clusterctl/client/generate_provider.go +++ b/cmd/clusterctl/client/generate_provider.go @@ -17,6 +17,8 @@ limitations under the License. package client import ( + "context" + "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/version" @@ -24,7 +26,7 @@ import ( clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" ) -func (c *clusterctlClient) GenerateProvider(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { +func (c *clusterctlClient) GenerateProvider(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { providerName, providerVersion, err := parseProviderName(provider) if err != nil { return nil, err @@ -35,7 +37,7 @@ func (c *clusterctlClient) GenerateProvider(provider string, providerType cluste return nil, err } - providerRepositoryClient, err := c.repositoryClientFactory(RepositoryClientFactoryInput{Provider: configRepository}) + providerRepositoryClient, err := c.repositoryClientFactory(ctx, RepositoryClientFactoryInput{Provider: configRepository}) if err != nil { return nil, err } @@ -44,7 +46,7 @@ func (c *clusterctlClient) GenerateProvider(provider string, providerType cluste providerVersion = providerRepositoryClient.DefaultVersion() } - latestMetadata, err := providerRepositoryClient.Metadata(providerVersion).Get() + latestMetadata, err := providerRepositoryClient.Metadata(providerVersion).Get(ctx) if err != nil { return nil, err } @@ -63,5 +65,5 @@ func (c *clusterctlClient) GenerateProvider(provider string, providerType cluste return nil, errors.Errorf("current version of clusterctl is only compatible with %s providers, detected %s for provider %s", clusterv1.GroupVersion.Version, releaseSeries.Contract, providerName) } - return c.GetProviderComponents(provider, providerType, options) + return c.GetProviderComponents(ctx, provider, providerType, options) } diff --git a/cmd/clusterctl/client/get_kubeconfig.go b/cmd/clusterctl/client/get_kubeconfig.go index 7f33402d7d8f..9b8c8eb991ee 100644 --- a/cmd/clusterctl/client/get_kubeconfig.go +++ b/cmd/clusterctl/client/get_kubeconfig.go @@ -17,6 +17,8 @@ limitations under the License. package client import ( + "context" + "github.com/pkg/errors" ) @@ -33,7 +35,7 @@ type GetKubeconfigOptions struct { WorkloadClusterName string } -func (c *clusterctlClient) GetKubeconfig(options GetKubeconfigOptions) (string, error) { +func (c *clusterctlClient) GetKubeconfig(ctx context.Context, options GetKubeconfigOptions) (string, error) { // gets access to the management cluster clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { @@ -41,7 +43,7 @@ func (c *clusterctlClient) GetKubeconfig(options GetKubeconfigOptions) (string, } // Ensure this command only runs against management clusters with the current Cluster API contract. - if err := clusterClient.ProviderInventory().CheckCAPIContract(); err != nil { + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx); err != nil { return "", err } @@ -56,5 +58,5 @@ func (c *clusterctlClient) GetKubeconfig(options GetKubeconfigOptions) (string, options.Namespace = currentNamespace } - return clusterClient.WorkloadCluster().GetKubeconfig(options.WorkloadClusterName, options.Namespace) + return clusterClient.WorkloadCluster().GetKubeconfig(ctx, options.WorkloadClusterName, options.Namespace) } diff --git a/cmd/clusterctl/client/get_kubeconfig_test.go b/cmd/clusterctl/client/get_kubeconfig_test.go index c3700803adfd..e9c58d289909 100644 --- a/cmd/clusterctl/client/get_kubeconfig_test.go +++ b/cmd/clusterctl/client/get_kubeconfig_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "testing" . "github.com/onsi/gomega" @@ -26,13 +27,15 @@ import ( ) func Test_clusterctlClient_GetKubeconfig(t *testing.T) { - configClient := newFakeConfig() + ctx := context.Background() + + configClient := newFakeConfig(ctx) kubeconfig := cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"} clusterClient := newFakeCluster(cluster.Kubeconfig{Path: "cluster1"}, configClient) // create a clusterctl client where the proxy returns an empty namespace clusterClient.fakeProxy = test.NewFakeProxy().WithNamespace("").WithFakeCAPISetup() - badClient := newFakeClient(configClient).WithCluster(clusterClient) + badClient := newFakeClient(ctx, configClient).WithCluster(clusterClient) tests := []struct { name string @@ -57,7 +60,7 @@ func Test_clusterctlClient_GetKubeconfig(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - config, err := tt.client.GetKubeconfig(tt.options) + config, err := tt.client.GetKubeconfig(ctx, tt.options) if tt.expectErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/init.go b/cmd/clusterctl/client/init.go index aefeb8e7035f..562f21ee737c 100644 --- a/cmd/clusterctl/client/init.go +++ b/cmd/clusterctl/client/init.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "sort" "time" @@ -59,6 +60,9 @@ type InitOptions struct { // RuntimeExtensionProviders and versions (e.g. test:v0.0.1) to add to the management cluster. RuntimeExtensionProviders []string + // AddonProviders and versions (e.g. helm:v0.1.0) to add to the management cluster. + AddonProviders []string + // TargetNamespace defines the namespace where the providers should be deployed. If unspecified, each provider // will be installed in a provider's default namespace. TargetNamespace string @@ -86,9 +90,15 @@ type InitOptions struct { } // Init initializes a management cluster by adding the requested list of providers. -func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { +func (c *clusterctlClient) Init(ctx context.Context, options InitOptions) ([]Components, error) { log := logf.Log + // Default WaitProviderTimeout as we cannot rely on defaulting in the CLI + // when clusterctl is used as a library. + if options.WaitProviderTimeout.Nanoseconds() == 0 { + options.WaitProviderTimeout = time.Duration(5*60) * time.Second + } + // gets access to the management cluster clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { @@ -96,12 +106,12 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { } // ensure the custom resource definitions required by clusterctl are in place - if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return nil, err } - // Ensure this command only runs against v1alpha4 management clusters - if err := clusterClient.ProviderInventory().CheckCAPIContract(cluster.AllowCAPINotInstalled{}); err != nil { + // Ensure this command only runs against v1beta1 management clusters + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx, cluster.AllowCAPINotInstalled{}); err != nil { return nil, err } @@ -109,11 +119,11 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { // if not we consider this the first time init is executed, and thus we enforce the installation of a core provider, // a bootstrap provider and a control-plane provider (if not already explicitly requested by the user) log.Info("Fetching providers") - firstRun := c.addDefaultProviders(clusterClient, &options) + firstRun := c.addDefaultProviders(ctx, clusterClient, &options) // create an installer service, add the requested providers to the install queue and then perform validation // of the target state of the management cluster before starting the installation. - installer, err := c.setupInstaller(clusterClient, options) + installer, err := c.setupInstaller(ctx, clusterClient, options) if err != nil { return nil, err } @@ -123,7 +133,7 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { // - All the providers must support the same API Version of Cluster API (contract) // - All provider CRDs that are referenced in core Cluster API CRDs must comply with the CRD naming scheme, // otherwise a warning is logged. - if err := installer.Validate(); err != nil { + if err := installer.Validate(ctx); err != nil { if !options.IgnoreValidationErrors { return nil, err } @@ -132,7 +142,7 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { // Before installing the providers, ensure the cert-manager Webhook is in place. certManager := clusterClient.CertManager() - if err := certManager.EnsureInstalled(); err != nil { + if err := certManager.EnsureInstalled(ctx); err != nil { return nil, err } @@ -140,7 +150,7 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { WaitProviders: options.WaitProviders, WaitProviderTimeout: options.WaitProviderTimeout, } - components, err := installer.Install(installOpts) + components, err := installer.Install(ctx, installOpts) if err != nil { return nil, err } @@ -165,22 +175,22 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { } // InitImages returns the list of images required for init. -func (c *clusterctlClient) InitImages(options InitOptions) ([]string, error) { +func (c *clusterctlClient) InitImages(ctx context.Context, options InitOptions) ([]string, error) { // gets access to the management cluster clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return nil, err } - // Ensure this command only runs against empty management clusters or v1alpha4 management clusters. - if err := clusterClient.ProviderInventory().CheckCAPIContract(cluster.AllowCAPINotInstalled{}); err != nil { + // Ensure this command only runs against empty management clusters or v1beta1 management clusters. + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx, cluster.AllowCAPINotInstalled{}); err != nil { return nil, err } // checks if the cluster already contains a Core provider. // if not we consider this the first time init is executed, and thus we enforce the installation of a core provider, // a bootstrap provider and a control-plane provider (if not already explicitly requested by the user) - c.addDefaultProviders(clusterClient, &options) + c.addDefaultProviders(ctx, clusterClient, &options) // skip variable parsing when listing images options.skipTemplateProcess = true @@ -189,14 +199,14 @@ func (c *clusterctlClient) InitImages(options InitOptions) ([]string, error) { // create an installer service, add the requested providers to the install queue and then perform validation // of the target state of the management cluster before starting the installation. - installer, err := c.setupInstaller(clusterClient, options) + installer, err := c.setupInstaller(ctx, clusterClient, options) if err != nil { return nil, err } // Gets the list of container images required for the cert-manager (if not already installed). certManager := clusterClient.CertManager() - images, err := certManager.Images() + images, err := certManager.Images(ctx) if err != nil { return nil, err } @@ -208,7 +218,7 @@ func (c *clusterctlClient) InitImages(options InitOptions) ([]string, error) { return images, nil } -func (c *clusterctlClient) setupInstaller(cluster cluster.Client, options InitOptions) (cluster.ProviderInstaller, error) { +func (c *clusterctlClient) setupInstaller(ctx context.Context, cluster cluster.Client, options InitOptions) (cluster.ProviderInstaller, error) { installer := cluster.ProviderInstaller() providerList := &clusterctlv1.ProviderList{} @@ -221,7 +231,7 @@ func (c *clusterctlClient) setupInstaller(cluster cluster.Client, options InitOp } if !options.allowMissingProviderCRD { - providerList, err := cluster.ProviderInventory().List() + providerList, err := cluster.ProviderInventory().List(ctx) if err != nil { return nil, err } @@ -230,40 +240,44 @@ func (c *clusterctlClient) setupInstaller(cluster cluster.Client, options InitOp } if options.CoreProvider != "" { - if err := c.addToInstaller(addOptions, clusterctlv1.CoreProviderType, options.CoreProvider); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.CoreProviderType, options.CoreProvider); err != nil { return nil, err } } - if err := c.addToInstaller(addOptions, clusterctlv1.BootstrapProviderType, options.BootstrapProviders...); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.BootstrapProviderType, options.BootstrapProviders...); err != nil { + return nil, err + } + + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.ControlPlaneProviderType, options.ControlPlaneProviders...); err != nil { return nil, err } - if err := c.addToInstaller(addOptions, clusterctlv1.ControlPlaneProviderType, options.ControlPlaneProviders...); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.InfrastructureProviderType, options.InfrastructureProviders...); err != nil { return nil, err } - if err := c.addToInstaller(addOptions, clusterctlv1.InfrastructureProviderType, options.InfrastructureProviders...); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.IPAMProviderType, options.IPAMProviders...); err != nil { return nil, err } - if err := c.addToInstaller(addOptions, clusterctlv1.IPAMProviderType, options.IPAMProviders...); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.RuntimeExtensionProviderType, options.RuntimeExtensionProviders...); err != nil { return nil, err } - if err := c.addToInstaller(addOptions, clusterctlv1.RuntimeExtensionProviderType, options.RuntimeExtensionProviders...); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.AddonProviderType, options.AddonProviders...); err != nil { return nil, err } return installer, nil } -func (c *clusterctlClient) addDefaultProviders(cluster cluster.Client, options *InitOptions) bool { +func (c *clusterctlClient) addDefaultProviders(ctx context.Context, cluster cluster.Client, options *InitOptions) bool { firstRun := false // Check if there is already a core provider installed in the cluster // Nb. we are ignoring the error so this operation can support listing images even if there is no an existing management cluster; // in case there is no an existing management cluster, we assume there are no core providers installed in the cluster. - currentCoreProvider, _ := cluster.ProviderInventory().GetDefaultProviderName(clusterctlv1.CoreProviderType) + currentCoreProvider, _ := cluster.ProviderInventory().GetDefaultProviderName(ctx, clusterctlv1.CoreProviderType) // If there are no core providers installed in the cluster, consider this a first run and add default providers to the list // of providers to be installed. @@ -290,7 +304,7 @@ type addToInstallerOptions struct { } // addToInstaller adds the components to the install queue and checks that the actual provider type match the target group. -func (c *clusterctlClient) addToInstaller(options addToInstallerOptions, providerType clusterctlv1.ProviderType, providers ...string) error { +func (c *clusterctlClient) addToInstaller(ctx context.Context, options addToInstallerOptions, providerType clusterctlv1.ProviderType, providers ...string) error { for _, provider := range providers { // It is possible to opt-out from automatic installation of bootstrap/control-plane providers using '-' as a provider name (NoopProvider). if provider == NoopProvider { @@ -303,7 +317,7 @@ func (c *clusterctlClient) addToInstaller(options addToInstallerOptions, provide TargetNamespace: options.targetNamespace, SkipTemplateProcess: options.skipTemplateProcess, } - components, err := c.getComponentsByName(provider, providerType, componentsOptions) + components, err := c.getComponentsByName(ctx, provider, providerType, componentsOptions) if err != nil { return errors.Wrapf(err, "failed to get provider components for the %q provider", provider) } diff --git a/cmd/clusterctl/client/init_test.go b/cmd/clusterctl/client/init_test.go index 8b3e72c55e27..3a98ee88b94a 100644 --- a/cmd/clusterctl/client/init_test.go +++ b/cmd/clusterctl/client/init_test.go @@ -170,7 +170,7 @@ func Test_clusterctlClient_InitImages(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := tt.field.client.InitImages(InitOptions{ + got, err := tt.field.client.InitImages(ctx, InitOptions{ Kubeconfig: Kubeconfig{Path: "kubeconfig", Context: tt.args.kubeconfigContext}, CoreProvider: tt.args.coreProvider, BootstrapProviders: tt.args.bootstrapProvider, @@ -186,7 +186,7 @@ func Test_clusterctlClient_InitImages(t *testing.T) { g.Expect(err.Error()).To(ContainSubstring(tt.expectedErrorMessage)) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(HaveLen(len(tt.expectedImages))) g.Expect(got).To(ConsistOf(tt.expectedImages)) }) @@ -196,7 +196,7 @@ func Test_clusterctlClient_InitImages(t *testing.T) { func Test_clusterctlClient_Init(t *testing.T) { // create a config variables client which does not have the value for // SOME_VARIABLE as expected in the infra components YAML - fconfig := newFakeConfig(). + fconfig := newFakeConfig(ctx). WithVar("ANOTHER_VARIABLE", "value"). WithProvider(capiProviderConfig). WithProvider(infraProviderConfig) @@ -538,10 +538,10 @@ func Test_clusterctlClient_Init(t *testing.T) { if tt.field.hasCRD { input := cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"} - g.Expect(tt.field.client.clusters[input].ProviderInventory().EnsureCustomResourceDefinitions()).To(Succeed()) + g.Expect(tt.field.client.clusters[input].ProviderInventory().EnsureCustomResourceDefinitions(ctx)).To(Succeed()) } - got, err := tt.field.client.Init(InitOptions{ + got, err := tt.field.client.Init(ctx, InitOptions{ Kubeconfig: Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, CoreProvider: tt.args.coreProvider, BootstrapProviders: tt.args.bootstrapProvider, @@ -553,7 +553,7 @@ func Test_clusterctlClient_Init(t *testing.T) { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(HaveLen(len(tt.want))) for i, gItem := range got { @@ -578,7 +578,7 @@ var ( func setupCluster(providers []Provider, certManagerClient cluster.CertManagerClient) (*fakeConfigClient, *fakeClient) { // create a config variables client which does not have the value for // SOME_VARIABLE as expected in the infra components YAML - cfg := newFakeConfig(). + cfg := newFakeConfig(ctx). WithVar("ANOTHER_VARIABLE", "value"). WithProvider(capiProviderConfig). WithProvider(infraProviderConfig) @@ -613,7 +613,7 @@ func fakeEmptyCluster() *fakeClient { } func fakeConfig(providers []config.Provider, variables map[string]string) *fakeConfigClient { - config := newFakeConfig() + config := newFakeConfig(ctx) for _, p := range providers { config = config.WithProvider(p) } @@ -635,7 +635,7 @@ func fakeCluster(config *fakeConfigClient, repos []*fakeRepositoryClient, certMa // fakeRepositories returns a base set of repositories for the different types // of providers. func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRepositoryClient { - repository1 := newFakeRepository(capiProviderConfig, config). + repository1 := newFakeRepository(ctx, capiProviderConfig, config). WithPaths("root", "components.yaml"). WithDefaultVersion("v1.0.0"). WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). @@ -658,7 +658,7 @@ func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRep {Major: 1, Minor: 1, Contract: test.CurrentCAPIContract}, }, }) - repository2 := newFakeRepository(bootstrapProviderConfig, config). + repository2 := newFakeRepository(ctx, bootstrapProviderConfig, config). WithPaths("root", "components.yaml"). WithDefaultVersion("v2.0.0"). WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). @@ -681,7 +681,7 @@ func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRep {Major: 2, Minor: 1, Contract: test.CurrentCAPIContract}, }, }) - repository3 := newFakeRepository(controlPlaneProviderConfig, config). + repository3 := newFakeRepository(ctx, controlPlaneProviderConfig, config). WithPaths("root", "components.yaml"). WithDefaultVersion("v2.0.0"). WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). @@ -704,7 +704,7 @@ func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRep {Major: 2, Minor: 1, Contract: test.CurrentCAPIContract}, }, }) - repository4 := newFakeRepository(infraProviderConfig, config). + repository4 := newFakeRepository(ctx, infraProviderConfig, config). WithPaths("root", "components.yaml"). WithDefaultVersion("v3.0.0"). WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). @@ -733,7 +733,7 @@ func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRep for _, provider := range providers { providerRepositories = append(providerRepositories, - newFakeRepository(provider, config). + newFakeRepository(ctx, provider, config). WithPaths("root", "components.yaml"). WithDefaultVersion("v2.0.0"). WithFile("v2.0.0", "components.yaml", componentsYAML("ns2")). @@ -748,7 +748,7 @@ func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRep } func fakeClusterCtlClient(config *fakeConfigClient, repos []*fakeRepositoryClient, clusters []*fakeClusterClient) *fakeClient { - client := newFakeClient(config) + client := newFakeClient(ctx, config) for _, r := range repos { client = client.WithRepository(r) } diff --git a/cmd/clusterctl/client/move.go b/cmd/clusterctl/client/move.go index 32d90c65a578..6d5299cc9131 100644 --- a/cmd/clusterctl/client/move.go +++ b/cmd/clusterctl/client/move.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "os" "github.com/pkg/errors" @@ -38,6 +39,10 @@ type MoveOptions struct { // namespace will be used. Namespace string + // ExperimentalResourceMutatorFn accepts any number of resource mutator functions that are applied on all resources being moved. + // This is an experimental feature and is exposed only from the library and not (yet) through the CLI. + ExperimentalResourceMutators []cluster.ResourceMutatorFunc + // FromDirectory apply configuration from directory. FromDirectory string @@ -48,7 +53,7 @@ type MoveOptions struct { DryRun bool } -func (c *clusterctlClient) Move(options MoveOptions) error { +func (c *clusterctlClient) Move(ctx context.Context, options MoveOptions) error { // Both backup and restore makes no sense. It's a complete move. if options.FromDirectory != "" && options.ToDirectory != "" { return errors.Errorf("can't set both FromDirectory and ToDirectory") @@ -62,17 +67,17 @@ func (c *clusterctlClient) Move(options MoveOptions) error { } if options.ToDirectory != "" { - return c.toDirectory(options) + return c.toDirectory(ctx, options) } else if options.FromDirectory != "" { - return c.fromDirectory(options) - } else { - return c.move(options) + return c.fromDirectory(ctx, options) } + + return c.move(ctx, options) } -func (c *clusterctlClient) move(options MoveOptions) error { +func (c *clusterctlClient) move(ctx context.Context, options MoveOptions) error { // Get the client for interacting with the source management cluster. - fromCluster, err := c.getClusterClient(options.FromKubeconfig) + fromCluster, err := c.getClusterClient(ctx, options.FromKubeconfig) if err != nil { return err } @@ -89,16 +94,16 @@ func (c *clusterctlClient) move(options MoveOptions) error { var toCluster cluster.Client if !options.DryRun { // Get the client for interacting with the target management cluster. - if toCluster, err = c.getClusterClient(options.ToKubeconfig); err != nil { + if toCluster, err = c.getClusterClient(ctx, options.ToKubeconfig); err != nil { return err } } - return fromCluster.ObjectMover().Move(options.Namespace, toCluster, options.DryRun) + return fromCluster.ObjectMover().Move(ctx, options.Namespace, toCluster, options.DryRun, options.ExperimentalResourceMutators...) } -func (c *clusterctlClient) fromDirectory(options MoveOptions) error { - toCluster, err := c.getClusterClient(options.ToKubeconfig) +func (c *clusterctlClient) fromDirectory(ctx context.Context, options MoveOptions) error { + toCluster, err := c.getClusterClient(ctx, options.ToKubeconfig) if err != nil { return err } @@ -107,11 +112,11 @@ func (c *clusterctlClient) fromDirectory(options MoveOptions) error { return err } - return toCluster.ObjectMover().FromDirectory(toCluster, options.FromDirectory) + return toCluster.ObjectMover().FromDirectory(ctx, toCluster, options.FromDirectory) } -func (c *clusterctlClient) toDirectory(options MoveOptions) error { - fromCluster, err := c.getClusterClient(options.FromKubeconfig) +func (c *clusterctlClient) toDirectory(ctx context.Context, options MoveOptions) error { + fromCluster, err := c.getClusterClient(ctx, options.FromKubeconfig) if err != nil { return err } @@ -129,22 +134,22 @@ func (c *clusterctlClient) toDirectory(options MoveOptions) error { return err } - return fromCluster.ObjectMover().ToDirectory(options.Namespace, options.ToDirectory) + return fromCluster.ObjectMover().ToDirectory(ctx, options.Namespace, options.ToDirectory) } -func (c *clusterctlClient) getClusterClient(kubeconfig Kubeconfig) (cluster.Client, error) { +func (c *clusterctlClient) getClusterClient(ctx context.Context, kubeconfig Kubeconfig) (cluster.Client, error) { cluster, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: kubeconfig}) if err != nil { return nil, err } // Ensure this command only runs against management clusters with the current Cluster API contract. - if err := cluster.ProviderInventory().CheckCAPIContract(); err != nil { + if err := cluster.ProviderInventory().CheckCAPIContract(ctx); err != nil { return nil, err } // Ensures the custom resource definitions required by clusterctl are in place. - if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return nil, err } return cluster, nil diff --git a/cmd/clusterctl/client/move_test.go b/cmd/clusterctl/client/move_test.go index a34ba55cc31b..83ad30d67a82 100644 --- a/cmd/clusterctl/client/move_test.go +++ b/cmd/clusterctl/client/move_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "os" "testing" @@ -129,12 +130,14 @@ func Test_clusterctlClient_Move(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.Move(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.Move(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } @@ -192,12 +195,14 @@ func Test_clusterctlClient_ToDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.Move(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.Move(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } @@ -255,21 +260,25 @@ func Test_clusterctlClient_FromDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.Move(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.Move(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } func fakeClientForMove() *fakeClient { + ctx := context.Background() + core := config.NewProvider("cluster-api", "https://somewhere.com", clusterctlv1.CoreProviderType) infra := config.NewProvider("infra", "https://somewhere.com", clusterctlv1.InfrastructureProviderType) - config1 := newFakeConfig(). + config1 := newFakeConfig(ctx). WithProvider(core). WithProvider(infra) @@ -285,7 +294,7 @@ func fakeClientForMove() *fakeClient { WithProviderInventory(infra.Name(), infra.Type(), "v2.0.0", "infra-system"). WithObjs(test.FakeCAPISetupObjects()...) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithCluster(cluster1). WithCluster(cluster2) @@ -298,22 +307,22 @@ type fakeObjectMover struct { fromDirectoryErr error } -func (f *fakeObjectMover) Move(_ string, _ cluster.Client, _ bool) error { +func (f *fakeObjectMover) Move(_ context.Context, _ string, _ cluster.Client, _ bool, _ ...cluster.ResourceMutatorFunc) error { return f.moveErr } -func (f *fakeObjectMover) ToDirectory(_ string, _ string) error { +func (f *fakeObjectMover) ToDirectory(_ context.Context, _ string, _ string) error { return f.toDirectoryErr } -func (f *fakeObjectMover) Backup(_ string, _ string) error { +func (f *fakeObjectMover) Backup(_ context.Context, _ string, _ string) error { return f.toDirectoryErr } -func (f *fakeObjectMover) FromDirectory(_ cluster.Client, _ string) error { +func (f *fakeObjectMover) FromDirectory(_ context.Context, _ cluster.Client, _ string) error { return f.fromDirectoryErr } -func (f *fakeObjectMover) Restore(_ cluster.Client, _ string) error { +func (f *fakeObjectMover) Restore(_ context.Context, _ cluster.Client, _ string) error { return f.fromDirectoryErr } diff --git a/cmd/clusterctl/client/repository/client.go b/cmd/clusterctl/client/repository/client.go index 915936880ec0..b191b9b0fdb9 100644 --- a/cmd/clusterctl/client/repository/client.go +++ b/cmd/clusterctl/client/repository/client.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "net/url" "strings" @@ -39,7 +40,7 @@ type Client interface { DefaultVersion() string // GetVersions return the list of versions that are available in a provider repository - GetVersions() ([]string, error) + GetVersions(ctx context.Context) ([]string, error) // Components provide access to YAML file for creating provider components. Components() ComponentsClient @@ -71,8 +72,8 @@ func (c *repositoryClient) DefaultVersion() string { return c.repository.DefaultVersion() } -func (c *repositoryClient) GetVersions() ([]string, error) { - return c.repository.GetVersions() +func (c *repositoryClient) GetVersions(ctx context.Context) ([]string, error) { + return c.repository.GetVersions(ctx) } func (c *repositoryClient) Components() ComponentsClient { @@ -115,11 +116,11 @@ func InjectYamlProcessor(p yaml.Processor) Option { } // New returns a Client. -func New(provider config.Provider, configClient config.Client, options ...Option) (Client, error) { - return newRepositoryClient(provider, configClient, options...) +func New(ctx context.Context, provider config.Provider, configClient config.Client, options ...Option) (Client, error) { + return newRepositoryClient(ctx, provider, configClient, options...) } -func newRepositoryClient(provider config.Provider, configClient config.Client, options ...Option) (*repositoryClient, error) { +func newRepositoryClient(ctx context.Context, provider config.Provider, configClient config.Client, options ...Option) (*repositoryClient, error) { client := &repositoryClient{ Provider: provider, configClient: configClient, @@ -131,7 +132,7 @@ func newRepositoryClient(provider config.Provider, configClient config.Client, o // if there is an injected repository, use it, otherwise use a default one if client.repository == nil { - r, err := repositoryFactory(provider, configClient.Variables()) + r, err := repositoryFactory(ctx, provider, configClient.Variables()) if err != nil { return nil, errors.Wrapf(err, "failed to get repository client for the %s with name %s", provider.Type(), provider.Name()) } @@ -160,14 +161,14 @@ type Repository interface { ComponentsPath() string // GetFile return a file for a given provider version. - GetFile(version string, path string) ([]byte, error) + GetFile(ctx context.Context, version string, path string) ([]byte, error) // GetVersions return the list of versions that are available in a provider repository - GetVersions() ([]string, error) + GetVersions(ctx context.Context) ([]string, error) } // repositoryFactory returns the repository implementation corresponding to the provider URL. -func repositoryFactory(providerConfig config.Provider, configVariablesClient config.VariablesClient) (Repository, error) { +func repositoryFactory(ctx context.Context, providerConfig config.Provider, configVariablesClient config.VariablesClient) (Repository, error) { // parse the repository url rURL, err := url.Parse(providerConfig.URL()) if err != nil { @@ -177,7 +178,7 @@ func repositoryFactory(providerConfig config.Provider, configVariablesClient con if rURL.Scheme == httpsScheme { // if the url is a GitHub repository if rURL.Host == githubDomain { - repo, err := NewGitHubRepository(providerConfig, configVariablesClient) + repo, err := NewGitHubRepository(ctx, providerConfig, configVariablesClient) if err != nil { return nil, errors.Wrap(err, "error creating the GitHub repository client") } @@ -198,7 +199,7 @@ func repositoryFactory(providerConfig config.Provider, configVariablesClient con // if the url is a local filesystem repository if rURL.Scheme == "file" || rURL.Scheme == "" { - repo, err := newLocalRepository(providerConfig, configVariablesClient) + repo, err := newLocalRepository(ctx, providerConfig, configVariablesClient) if err != nil { return nil, errors.Wrap(err, "error creating the local filesystem repository client") } diff --git a/cmd/clusterctl/client/repository/client_test.go b/cmd/clusterctl/client/repository/client_test.go index 93e3a409352c..5de7d91d85b9 100644 --- a/cmd/clusterctl/client/repository/client_test.go +++ b/cmd/clusterctl/client/repository/client_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "os" "testing" @@ -31,14 +32,16 @@ import ( func Test_newRepositoryClient_LocalFileSystemRepository(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + tmpDir := createTempDir(t) defer os.RemoveAll(tmpDir) dst1 := createLocalTestProviderFile(t, tmpDir, "bootstrap-foo/v1.0.0/bootstrap-components.yaml", "") dst2 := createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v2.0.0/bootstrap-components.yaml", "") - configClient, err := config.New("", config.InjectReader(test.NewFakeReader())) - g.Expect(err).NotTo(HaveOccurred()) + configClient, err := config.New(ctx, "", config.InjectReader(test.NewFakeReader())) + g.Expect(err).ToNot(HaveOccurred()) type fields struct { provider config.Provider @@ -81,8 +84,10 @@ func Test_newRepositoryClient_LocalFileSystemRepository(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - repoClient, err := newRepositoryClient(tt.fields.provider, configClient) - gs.Expect(err).NotTo(HaveOccurred()) + ctx := context.Background() + + repoClient, err := newRepositoryClient(ctx, tt.fields.provider, configClient) + gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(repoClient.repository).To(BeAssignableToTypeOf(tt.expected)) }) @@ -126,18 +131,22 @@ func Test_newRepositoryClient_YamlProcessor(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + configProvider := config.NewProvider("fakeProvider", "", clusterctlv1.CoreProviderType) - configClient, err := config.New("", config.InjectReader(test.NewFakeReader())) - g.Expect(err).NotTo(HaveOccurred()) + configClient, err := config.New(ctx, "", config.InjectReader(test.NewFakeReader())) + g.Expect(err).ToNot(HaveOccurred()) tt.opts = append(tt.opts, InjectRepository(NewMemoryRepository())) repoClient, err := newRepositoryClient( + ctx, configProvider, configClient, tt.opts..., ) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) tt.assert(g, repoClient.processor) }) } diff --git a/cmd/clusterctl/client/repository/clusterclass_client.go b/cmd/clusterctl/client/repository/clusterclass_client.go index afd0d8d60ebb..7b199d81a5e9 100644 --- a/cmd/clusterctl/client/repository/clusterclass_client.go +++ b/cmd/clusterctl/client/repository/clusterclass_client.go @@ -17,6 +17,8 @@ limitations under the License. package repository import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" @@ -27,7 +29,7 @@ import ( // ClusterClassClient has methods to work with cluster class templates hosted on a provider repository. // Templates are yaml files to be used for creating a guest cluster. type ClusterClassClient interface { - Get(name, targetNamespace string, skipTemplateProcess bool) (Template, error) + Get(ctx context.Context, name, targetNamespace string, skipTemplateProcess bool) (Template, error) } type clusterClassClient struct { @@ -57,7 +59,7 @@ func newClusterClassClient(input ClusterClassClientInput) *clusterClassClient { } } -func (cc *clusterClassClient) Get(name, targetNamespace string, skipTemplateProcess bool) (Template, error) { +func (cc *clusterClassClient) Get(ctx context.Context, name, targetNamespace string, skipTemplateProcess bool) (Template, error) { log := logf.Log if targetNamespace == "" { @@ -79,13 +81,13 @@ func (cc *clusterClassClient) Get(name, targetNamespace string, skipTemplateProc } if rawArtifact == nil { - log.V(5).Info("Fetching", "File", filename, "Provider", cc.provider.Name(), "Type", cc.provider.Type(), "Version", version) - rawArtifact, err = cc.repository.GetFile(version, filename) + log.V(5).Info("Fetching", "file", filename, "provider", cc.provider.Name(), "type", cc.provider.Type(), "version", version) + rawArtifact, err = cc.repository.GetFile(ctx, version, filename) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", filename, cc.provider.ManifestLabel()) } } else { - log.V(1).Info("Using", "Override", filename, "Provider", cc.provider.ManifestLabel(), "Version", version) + log.V(1).Info("Using", "override", filename, "provider", cc.provider.ManifestLabel(), "version", version) } return NewTemplate(TemplateInput{ diff --git a/cmd/clusterctl/client/repository/clusterclass_client_test.go b/cmd/clusterctl/client/repository/clusterclass_client_test.go index ce71a6c5ac88..4ba742ce5626 100644 --- a/cmd/clusterctl/client/repository/clusterclass_client_test.go +++ b/cmd/clusterctl/client/repository/clusterclass_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "fmt" "testing" @@ -161,6 +162,8 @@ func Test_ClusterClassClient_Get(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + f := newClusterClassClient( ClusterClassClientInput{ version: tt.fields.version, @@ -170,19 +173,19 @@ func Test_ClusterClassClient_Get(t *testing.T) { processor: tt.fields.processor, }, ) - got, err := f.Get(tt.args.name, tt.args.targetNamespace, tt.args.listVariablesOnly) + got, err := f.Get(ctx, tt.args.name, tt.args.targetNamespace, tt.args.listVariablesOnly) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got.Variables()).To(Equal(tt.want.variables)) g.Expect(got.TargetNamespace()).To(Equal(tt.want.targetNamespace)) // check variable replaced in yaml yaml, err := got.Yaml() - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) if !tt.args.listVariablesOnly { g.Expect(yaml).To(ContainSubstring(fmt.Sprintf("variable: %s", variableValue))) diff --git a/cmd/clusterctl/client/repository/components.go b/cmd/clusterctl/client/repository/components.go index 8c534e403a97..ba979513caeb 100644 --- a/cmd/clusterctl/client/repository/components.go +++ b/cmd/clusterctl/client/repository/components.go @@ -18,6 +18,7 @@ package repository import ( "fmt" + "sort" "strings" "github.com/pkg/errors" @@ -259,6 +260,22 @@ func NewComponents(input ComponentsInput) (Components, error) { // Add common labels. objs = addCommonLabels(objs, input.Provider) + // Deploying cert-manager objects and especially Certificates before Mutating- + // ValidatingWebhookConfigurations and CRDs ensures cert-manager's ca-injector + // receives the event for the objects at the right time to inject the new CA. + sort.SliceStable(objs, func(i, j int) bool { + // First prioritize Namespaces over everything. + if objs[i].GetKind() == "Namespace" { + return true + } + if objs[j].GetKind() == "Namespace" { + return false + } + + // Second prioritize cert-manager objects. + return objs[i].GroupVersionKind().Group == "cert-manager.io" + }) + return &components{ Provider: input.Provider, version: input.Options.Version, diff --git a/cmd/clusterctl/client/repository/components_client.go b/cmd/clusterctl/client/repository/components_client.go index 107e4cd5547d..1613f3e7124e 100644 --- a/cmd/clusterctl/client/repository/components_client.go +++ b/cmd/clusterctl/client/repository/components_client.go @@ -17,6 +17,8 @@ limitations under the License. package repository import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" @@ -27,8 +29,8 @@ import ( // ComponentsClient has methods to work with yaml file for generating provider components. // Assets are yaml files to be used for deploying a provider into a management cluster. type ComponentsClient interface { - Raw(options ComponentsOptions) ([]byte, error) - Get(options ComponentsOptions) (Components, error) + Raw(ctx context.Context, options ComponentsOptions) ([]byte, error) + Get(ctx context.Context, options ComponentsOptions) (Components, error) } // componentsClient implements ComponentsClient. @@ -53,20 +55,20 @@ func newComponentsClient(provider config.Provider, repository Repository, config } // Raw returns the components from a repository. -func (f *componentsClient) Raw(options ComponentsOptions) ([]byte, error) { - return f.getRawBytes(&options) +func (f *componentsClient) Raw(ctx context.Context, options ComponentsOptions) ([]byte, error) { + return f.getRawBytes(ctx, &options) } // Get returns the components from a repository. -func (f *componentsClient) Get(options ComponentsOptions) (Components, error) { - file, err := f.getRawBytes(&options) +func (f *componentsClient) Get(ctx context.Context, options ComponentsOptions) (Components, error) { + file, err := f.getRawBytes(ctx, &options) if err != nil { return nil, err } return NewComponents(ComponentsInput{f.provider, f.configClient, f.processor, file, options}) } -func (f *componentsClient) getRawBytes(options *ComponentsOptions) ([]byte, error) { +func (f *componentsClient) getRawBytes(ctx context.Context, options *ComponentsOptions) ([]byte, error) { log := logf.Log // If the request does not target a specific version, read from the default repository version that is derived from the repository URL, e.g. latest. @@ -89,13 +91,13 @@ func (f *componentsClient) getRawBytes(options *ComponentsOptions) ([]byte, erro } if file == nil { - log.V(5).Info("Fetching", "File", path, "Provider", f.provider.Name(), "Type", f.provider.Type(), "Version", options.Version) - file, err = f.repository.GetFile(options.Version, path) + log.V(5).Info("Fetching", "file", path, "provider", f.provider.Name(), "type", f.provider.Type(), "version", options.Version) + file, err = f.repository.GetFile(ctx, options.Version, path) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", path, f.provider.ManifestLabel()) } } else { - log.Info("Using", "Override", path, "Provider", f.provider.ManifestLabel(), "Version", options.Version) + log.Info("Using", "override", path, "provider", f.provider.ManifestLabel(), "version", options.Version) } return file, nil } diff --git a/cmd/clusterctl/client/repository/components_client_test.go b/cmd/clusterctl/client/repository/components_client_test.go index 54fde746f5ec..48c476ea358f 100644 --- a/cmd/clusterctl/client/repository/components_client_test.go +++ b/cmd/clusterctl/client/repository/components_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "fmt" "testing" @@ -66,8 +67,8 @@ func Test_componentsClient_Get(t *testing.T) { p1 := config.NewProvider("p1", "", clusterctlv1.BootstrapProviderType) - configClient, err := config.New("", config.InjectReader(test.NewFakeReader().WithVar(variableName, variableValue))) - g.Expect(err).NotTo(HaveOccurred()) + configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader().WithVar(variableName, variableValue))) + g.Expect(err).ToNot(HaveOccurred()) type fields struct { provider config.Provider @@ -259,6 +260,8 @@ func Test_componentsClient_Get(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) + ctx := context.Background() + options := ComponentsOptions{ Version: tt.args.version, TargetNamespace: tt.args.targetNamespace, @@ -268,12 +271,12 @@ func Test_componentsClient_Get(t *testing.T) { if tt.fields.processor != nil { f.processor = tt.fields.processor } - got, err := f.Get(options) + got, err := f.Get(ctx, options) if tt.wantErr { gs.Expect(err).To(HaveOccurred()) return } - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(got.Name()).To(Equal(tt.want.provider.Name())) gs.Expect(got.Type()).To(Equal(tt.want.provider.Type())) diff --git a/cmd/clusterctl/client/repository/components_test.go b/cmd/clusterctl/client/repository/components_test.go index 1d78fbf5bb44..46beda4a39e5 100644 --- a/cmd/clusterctl/client/repository/components_test.go +++ b/cmd/clusterctl/client/repository/components_test.go @@ -95,7 +95,7 @@ func Test_inspectTargetNamespace(t *testing.T) { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) @@ -632,7 +632,7 @@ func Test_addNamespaceIfMissing(t *testing.T) { got := addNamespaceIfMissing(tt.args.objs, tt.args.targetNamespace) wgot, err := inspectTargetNamespace(got) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(wgot).To(Equal(tt.args.targetNamespace)) }) } @@ -682,7 +682,7 @@ func Test_addCommonLabels(t *testing.T) { g := NewWithT(t) got := addCommonLabels(tt.args.objs, config.NewProvider(tt.args.name, "", tt.args.providerType)) - g.Expect(got).To(Equal(tt.want)) + g.Expect(got).To(BeComparableTo(tt.want)) }) } } @@ -721,5 +721,5 @@ func TestAlterComponents(t *testing.T) { if err := AlterComponents(c, alterFn); err != nil { t.Errorf("AlterComponents() error = %v", err) } - g.Expect(c.objs).To(Equal(want)) + g.Expect(c.objs).To(BeComparableTo(want)) } diff --git a/cmd/clusterctl/client/repository/metadata_client.go b/cmd/clusterctl/client/repository/metadata_client.go index 6a75e0580e4b..84a7897b575a 100644 --- a/cmd/clusterctl/client/repository/metadata_client.go +++ b/cmd/clusterctl/client/repository/metadata_client.go @@ -17,6 +17,8 @@ limitations under the License. package repository import ( + "context" + "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -33,7 +35,7 @@ const metadataFile = "metadata.yaml" // Metadata are yaml files providing additional information about provider's assets like e.g the version compatibility Matrix. type MetadataClient interface { // Get returns the provider's metadata. - Get() (*clusterctlv1.Metadata, error) + Get(ctx context.Context) (*clusterctlv1.Metadata, error) } // metadataClient implements MetadataClient. @@ -57,7 +59,7 @@ func newMetadataClient(provider config.Provider, version string, repository Repo } } -func (f *metadataClient) Get() (*clusterctlv1.Metadata, error) { +func (f *metadataClient) Get(ctx context.Context) (*clusterctlv1.Metadata, error) { log := logf.Log // gets the metadata file from the repository @@ -73,13 +75,13 @@ func (f *metadataClient) Get() (*clusterctlv1.Metadata, error) { return nil, err } if file == nil { - log.V(5).Info("Fetching", "File", metadataFile, "Provider", f.provider.Name(), "Type", f.provider.Type(), "Version", version) - file, err = f.repository.GetFile(version, metadataFile) + log.V(5).Info("Fetching", "file", metadataFile, "provider", f.provider.Name(), "type", f.provider.Type(), "version", version) + file, err = f.repository.GetFile(ctx, version, metadataFile) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from the repository for provider %q", metadataFile, f.provider.ManifestLabel()) } } else { - log.V(1).Info("Using", "Override", metadataFile, "Provider", f.provider.ManifestLabel(), "Version", version) + log.V(1).Info("Using", "override", metadataFile, "provider", f.provider.ManifestLabel(), "version", version) } // Convert the yaml into a typed object diff --git a/cmd/clusterctl/client/repository/metadata_client_test.go b/cmd/clusterctl/client/repository/metadata_client_test.go index 5c16ffc38a97..84ceb260aa34 100644 --- a/cmd/clusterctl/client/repository/metadata_client_test.go +++ b/cmd/clusterctl/client/repository/metadata_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "testing" . "github.com/onsi/gomega" @@ -121,14 +122,14 @@ func Test_metadataClient_Get(t *testing.T) { version: tt.fields.version, repository: tt.fields.repository, } - got, err := f.Get() + got, err := f.Get(context.Background()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) + g.Expect(got).To(BeComparableTo(tt.want)) }) } } diff --git a/cmd/clusterctl/client/repository/overrides.go b/cmd/clusterctl/client/repository/overrides.go index 32c4a742ff5e..a791c055f190 100644 --- a/cmd/clusterctl/client/repository/overrides.go +++ b/cmd/clusterctl/client/repository/overrides.go @@ -23,11 +23,12 @@ import ( "runtime" "strings" + "github.com/adrg/xdg" "github.com/drone/envsubst/v2" "github.com/pkg/errors" - "k8s.io/client-go/util/homedir" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" + logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" ) const ( @@ -68,7 +69,11 @@ func newOverride(o *newOverrideInput) Overrider { // Path returns the fully formed path to the file within the specified // overrides config. func (o *overrides) Path() (string, error) { - basepath := filepath.Join(homedir.HomeDir(), config.ConfigFolder, overrideFolder) + configDirectory, err := xdg.ConfigFile(config.ConfigFolderXDG) + if err != nil { + return "", err + } + basepath := filepath.Join(configDirectory, overrideFolder) f, err := o.configVariablesClient.Get(overrideFolderKey) if err == nil && strings.TrimSpace(f) != "" { basepath = f @@ -103,7 +108,11 @@ func (o *overrides) Path() (string, error) { // getLocalOverride return local override file from the config folder, if it exists. // This is required for development purposes, but it can be used also in production as a workaround for problems on the official repositories. func getLocalOverride(info *newOverrideInput) ([]byte, error) { + log := logf.Log + overridePath, err := newOverride(info).Path() + log.V(5).Info("Potential override file", "searchFile", overridePath, "provider", info.provider.ManifestLabel(), "version", info.version) + if err != nil { return nil, err } diff --git a/cmd/clusterctl/client/repository/overrides_test.go b/cmd/clusterctl/client/repository/overrides_test.go index b68e2c72bf6d..b3fc1ebb149c 100644 --- a/cmd/clusterctl/client/repository/overrides_test.go +++ b/cmd/clusterctl/client/repository/overrides_test.go @@ -21,8 +21,8 @@ import ( "path/filepath" "testing" + "github.com/adrg/xdg" . "github.com/onsi/gomega" - "k8s.io/client-go/util/homedir" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" @@ -30,6 +30,9 @@ import ( ) func TestOverrides(t *testing.T) { + configDirectory, err := xdg.ConfigFile(config.ConfigFolderXDG) + NewWithT(t).Expect(err).ToNot(HaveOccurred()) + tests := []struct { name string configVarClient config.VariablesClient @@ -39,17 +42,17 @@ func TestOverrides(t *testing.T) { { name: "returns default overrides path if no config provided", configVarClient: test.NewFakeVariableClient(), - expectedPath: filepath.Join(homedir.HomeDir(), config.ConfigFolder, overrideFolder, "infrastructure-myinfra", "v1.0.1", "infra-comp.yaml"), + expectedPath: filepath.Join(configDirectory, overrideFolder, "infrastructure-myinfra", "v1.0.1", "infra-comp.yaml"), }, { name: "returns default overrides path if config variable is empty", configVarClient: test.NewFakeVariableClient().WithVar(overrideFolderKey, ""), - expectedPath: filepath.Join(homedir.HomeDir(), config.ConfigFolder, overrideFolder, "infrastructure-myinfra", "v1.0.1", "infra-comp.yaml"), + expectedPath: filepath.Join(configDirectory, overrideFolder, "infrastructure-myinfra", "v1.0.1", "infra-comp.yaml"), }, { name: "returns default overrides path if config variable is whitespace", configVarClient: test.NewFakeVariableClient().WithVar(overrideFolderKey, " "), - expectedPath: filepath.Join(homedir.HomeDir(), config.ConfigFolder, overrideFolder, "infrastructure-myinfra", "v1.0.1", "infra-comp.yaml"), + expectedPath: filepath.Join(configDirectory, overrideFolder, "infrastructure-myinfra", "v1.0.1", "infra-comp.yaml"), }, { name: "uses overrides folder from the config variables", @@ -86,7 +89,9 @@ func TestOverrides(t *testing.T) { filePath: "infra-comp.yaml", }) - g.Expect(override.Path()).To(Equal(tt.expectedPath)) + overridePath, err := override.Path() + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(overridePath).To(Equal(tt.expectedPath)) }) } } @@ -94,6 +99,7 @@ func TestOverrides(t *testing.T) { func TestGetLocalOverrides(t *testing.T) { t.Run("returns contents of file successfully", func(t *testing.T) { g := NewWithT(t) + tmpDir := createTempDir(t) defer os.RemoveAll(tmpDir) diff --git a/cmd/clusterctl/client/repository/repository_github.go b/cmd/clusterctl/client/repository/repository_github.go index 38f4b12cdcb8..786a06128549 100644 --- a/cmd/clusterctl/client/repository/repository_github.go +++ b/cmd/clusterctl/client/repository/repository_github.go @@ -28,8 +28,8 @@ import ( "strings" "time" - "github.com/blang/semver" - "github.com/google/go-github/v48/github" + "github.com/blang/semver/v4" + "github.com/google/go-github/v53/github" "github.com/pkg/errors" "golang.org/x/oauth2" "k8s.io/apimachinery/pkg/util/version" @@ -42,13 +42,16 @@ import ( ) const ( - httpsScheme = "https" - githubDomain = "github.com" - githubReleaseRepository = "releases" - githubLatestReleaseLabel = "latest" + httpsScheme = "https" + githubDomain = "github.com" + githubReleaseRepository = "releases" + githubLatestReleaseLabel = "latest" + githubListReleasesPerPageLimit = 100 ) var ( + errNotFound = errors.New("404 Not Found") + // Caches used to limit the number of GitHub API calls. cacheVersions = map[string][]string{} @@ -98,7 +101,7 @@ func (g *gitHubRepository) DefaultVersion() string { } // GetVersions returns the list of versions that are available in a provider repository. -func (g *gitHubRepository) GetVersions() ([]string, error) { +func (g *gitHubRepository) GetVersions(ctx context.Context) ([]string, error) { log := logf.Log cacheID := fmt.Sprintf("%s/%s", g.owner, g.repository) @@ -106,7 +109,7 @@ func (g *gitHubRepository) GetVersions() ([]string, error) { return versions, nil } - goProxyClient, err := g.getGoproxyClient() + goProxyClient, err := g.getGoproxyClient(ctx) if err != nil { return nil, errors.Wrap(err, "get versions client") } @@ -117,7 +120,7 @@ func (g *gitHubRepository) GetVersions() ([]string, error) { gomodulePath := path.Join(githubDomain, g.owner, g.repository) var parsedVersions semver.Versions - parsedVersions, err = goProxyClient.GetVersions(context.TODO(), gomodulePath) + parsedVersions, err = goProxyClient.GetVersions(ctx, gomodulePath) // Log the error before fallback to github repository client happens. if err != nil { @@ -131,7 +134,7 @@ func (g *gitHubRepository) GetVersions() ([]string, error) { // Fallback to github repository client if goProxyClient is nil or an error occurred. if goProxyClient == nil || err != nil { - versions, err = g.getVersions() + versions, err = g.getVersions(ctx) if err != nil { return nil, errors.Wrapf(err, "failed to get repository versions") } @@ -152,23 +155,50 @@ func (g *gitHubRepository) ComponentsPath() string { } // GetFile returns a file for a given provider version. -func (g *gitHubRepository) GetFile(version, path string) ([]byte, error) { - release, err := g.getReleaseByTag(version) +func (g *gitHubRepository) GetFile(ctx context.Context, version, path string) ([]byte, error) { + log := logf.Log + + cacheID := fmt.Sprintf("%s/%s:%s:%s", g.owner, g.repository, version, path) + if content, ok := cacheFiles[cacheID]; ok { + return content, nil + } + + // Try to get the file using http get. + // NOTE: this can be disabled by setting GORPOXY to `direct` or `off` (same knobs used for skipping goproxy requests). + if goProxyClient, _ := g.getGoproxyClient(ctx); goProxyClient != nil { + files, err := g.httpGetFilesFromRelease(ctx, version, path) + if err != nil { + log.V(5).Info("error using httpGet to get file from GitHub releases, falling back to github client", "owner", g.owner, "repository", g.repository, "version", version, "path", path, "error", err) + } else { + cacheFiles[cacheID] = files + return files, nil + } + } + + // If the http get request failed (or it is disabled) falls back on using the GITHUB api to download the file + + release, err := g.getReleaseByTag(ctx, version) if err != nil { + if errors.Is(err, errNotFound) { + // If it was ErrNotFound, then there is no release yet for the resolved tag. + // Ref: https://github.com/kubernetes-sigs/cluster-api/issues/7889 + return nil, errors.Wrapf(err, "release not found for version %s, please retry later or set \"GOPROXY=off\" to get the current stable release", version) + } return nil, errors.Wrapf(err, "failed to get GitHub release %s", version) } - // download files from the release - files, err := g.downloadFilesFromRelease(release, path) + // Download files from the release. + files, err := g.downloadFilesFromRelease(ctx, release, path) if err != nil { return nil, errors.Wrapf(err, "failed to download files from GitHub release %s", version) } + cacheFiles[cacheID] = files return files, nil } // NewGitHubRepository returns a gitHubRepository implementation. -func NewGitHubRepository(providerConfig config.Provider, configVariablesClient config.VariablesClient, opts ...githubRepositoryOption) (Repository, error) { +func NewGitHubRepository(ctx context.Context, providerConfig config.Provider, configVariablesClient config.VariablesClient, opts ...githubRepositoryOption) (Repository, error) { if configVariablesClient == nil { return nil, errors.New("invalid arguments: configVariablesClient can't be nil") } @@ -199,9 +229,9 @@ func NewGitHubRepository(providerConfig config.Provider, configVariablesClient c defaultVersion := urlSplit[3] path := strings.Join(urlSplit[4:], "/") - // use path's directory as a rootPath + // Use path's directory as a rootPath. rootPath := filepath.Dir(path) - // use the file name (if any) as componentsPath + // Use the file name (if any) as componentsPath. componentsPath := getComponentsPath(path, rootPath) repo := &gitHubRepository{ @@ -214,19 +244,19 @@ func NewGitHubRepository(providerConfig config.Provider, configVariablesClient c componentsPath: componentsPath, } - // process githubRepositoryOptions + // Process githubRepositoryOptions. for _, o := range opts { o(repo) } if token, err := configVariablesClient.Get(config.GitHubTokenVariable); err == nil { - repo.setClientToken(token) + repo.setClientToken(ctx, token) } if defaultVersion == githubLatestReleaseLabel { - repo.defaultVersion, err = latestContractRelease(repo, clusterv1.GroupVersion.Version) + repo.defaultVersion, err = latestContractRelease(ctx, repo, clusterv1.GroupVersion.Version) if err != nil { - return nil, errors.Wrap(err, "failed to get GitHub latest version") + return nil, errors.Wrap(err, "failed to get latest release") } } @@ -251,7 +281,7 @@ func (g *gitHubRepository) getClient() *github.Client { // getGoproxyClient returns a go proxy client. // It returns nil, nil if the environment variable is set to `direct` or `off` // to skip goproxy requests. -func (g *gitHubRepository) getGoproxyClient() (*goproxy.Client, error) { +func (g *gitHubRepository) getGoproxyClient(_ context.Context) (*goproxy.Client, error) { if g.injectGoproxyClient != nil { return g.injectGoproxyClient, nil } @@ -267,32 +297,50 @@ func (g *gitHubRepository) getGoproxyClient() (*goproxy.Client, error) { } // setClientToken sets authenticatingHTTPClient field of gitHubRepository struct. -func (g *gitHubRepository) setClientToken(token string) { +func (g *gitHubRepository) setClientToken(ctx context.Context, token string) { ts := oauth2.StaticTokenSource( &oauth2.Token{AccessToken: token}, ) - g.authenticatingHTTPClient = oauth2.NewClient(context.TODO(), ts) + g.authenticatingHTTPClient = oauth2.NewClient(ctx, ts) } // getVersions returns all the release versions for a github repository. -func (g *gitHubRepository) getVersions() ([]string, error) { +func (g *gitHubRepository) getVersions(ctx context.Context) ([]string, error) { client := g.getClient() - // get all the releases + // Get all the releases. // NB. currently Github API does not support result ordering, so it not possible to limit results - var releases []*github.RepositoryRelease + var allReleases []*github.RepositoryRelease var retryError error - _ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) { + _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { var listReleasesErr error - releases, _, listReleasesErr = client.Repositories.ListReleases(context.TODO(), g.owner, g.repository, nil) + // Get the first page of GitHub releases. + releases, response, listReleasesErr := client.Repositories.ListReleases(ctx, g.owner, g.repository, &github.ListOptions{PerPage: githubListReleasesPerPageLimit}) if listReleasesErr != nil { retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases") - // return immediately if we are rate limited + // Return immediately if we are rate limited. if _, ok := listReleasesErr.(*github.RateLimitError); ok { return false, retryError } return false, nil } + allReleases = append(allReleases, releases...) + + // Paginated GitHub APIs provide pointers to the first, next, previous and last + // pages in the response, which can be used to iterate through the pages. + // https://github.com/google/go-github/blob/14bb610698fc2f9013cad5db79b2d5fe4d53e13c/github/github.go#L541-L551 + for response.NextPage != 0 { + releases, response, listReleasesErr = client.Repositories.ListReleases(ctx, g.owner, g.repository, &github.ListOptions{Page: response.NextPage, PerPage: githubListReleasesPerPageLimit}) + if listReleasesErr != nil { + retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases") + // Return immediately if we are rate limited. + if _, ok := listReleasesErr.(*github.RateLimitError); ok { + return false, retryError + } + return false, nil + } + allReleases = append(allReleases, releases...) + } retryError = nil return true, nil }) @@ -300,7 +348,7 @@ func (g *gitHubRepository) getVersions() ([]string, error) { return nil, retryError } versions := []string{} - for _, r := range releases { + for _, r := range allReleases { r := r // pin if r.TagName == nil { continue @@ -317,7 +365,7 @@ func (g *gitHubRepository) getVersions() ([]string, error) { } // getReleaseByTag returns the github repository release with a specific tag name. -func (g *gitHubRepository) getReleaseByTag(tag string) (*github.RepositoryRelease, error) { +func (g *gitHubRepository) getReleaseByTag(ctx context.Context, tag string) (*github.RepositoryRelease, error) { cacheID := fmt.Sprintf("%s/%s:%s", g.owner, g.repository, tag) if release, ok := cacheReleases[cacheID]; ok { return release, nil @@ -327,12 +375,16 @@ func (g *gitHubRepository) getReleaseByTag(tag string) (*github.RepositoryReleas var release *github.RepositoryRelease var retryError error - _ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) { + _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { var getReleasesErr error - release, _, getReleasesErr = client.Repositories.GetReleaseByTag(context.TODO(), g.owner, g.repository, tag) + release, _, getReleasesErr = client.Repositories.GetReleaseByTag(ctx, g.owner, g.repository, tag) if getReleasesErr != nil { retryError = g.handleGithubErr(getReleasesErr, "failed to read release %q", tag) - // return immediately if we are rate limited + // Return immediately if not found + if errors.Is(retryError, errNotFound) { + return false, retryError + } + // Return immediately if we are rate limited. if _, ok := getReleasesErr.(*github.RateLimitError); ok { return false, retryError } @@ -349,19 +401,51 @@ func (g *gitHubRepository) getReleaseByTag(tag string) (*github.RepositoryReleas return release, nil } -// downloadFilesFromRelease download a file from release. -func (g *gitHubRepository) downloadFilesFromRelease(release *github.RepositoryRelease, fileName string) ([]byte, error) { - ctx := context.TODO() +// httpGetFilesFromRelease gets a file from github using http get. +func (g *gitHubRepository) httpGetFilesFromRelease(ctx context.Context, version, fileName string) ([]byte, error) { + downloadURL := fmt.Sprintf("https://github.com/%s/%s/releases/download/%s/%s", g.owner, g.repository, version, fileName) + var retryError error + var content []byte + _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(context.Context) (bool, error) { + resp, err := http.Get(downloadURL) //nolint:gosec,noctx + if err != nil { + retryError = errors.Wrap(err, "error sending request") + return false, nil + } + defer resp.Body.Close() - cacheID := fmt.Sprintf("%s/%s:%s:%s", g.owner, g.repository, *release.TagName, fileName) - if content, ok := cacheFiles[cacheID]; ok { - return content, nil + // if we get 404 there is no reason to retry + if resp.StatusCode == http.StatusNotFound { + retryError = errNotFound + return true, nil + } + + if resp.StatusCode != http.StatusOK { + retryError = errors.Errorf("error getting file, status code: %d", resp.StatusCode) + return false, nil + } + + content, err = io.ReadAll(resp.Body) + if err != nil { + retryError = errors.Wrap(err, "error reading response body") + return false, nil + } + + retryError = nil + return true, nil + }) + if retryError != nil { + return nil, retryError } + return content, nil +} +// downloadFilesFromRelease download a file from release. +func (g *gitHubRepository) downloadFilesFromRelease(ctx context.Context, release *github.RepositoryRelease, fileName string) ([]byte, error) { client := g.getClient() absoluteFileName := filepath.Join(g.rootPath, fileName) - // search for the file into the release assets, retrieving the asset id + // Search for the file into the release assets, retrieving the asset id. var assetID *int64 for _, a := range release.Assets { if a.Name != nil && *a.Name == absoluteFileName { @@ -375,41 +459,42 @@ func (g *gitHubRepository) downloadFilesFromRelease(release *github.RepositoryRe var reader io.ReadCloser var retryError error - _ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) { + var content []byte + _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { var redirect string var downloadReleaseError error reader, redirect, downloadReleaseError = client.Repositories.DownloadReleaseAsset(ctx, g.owner, g.repository, *assetID, http.DefaultClient) if downloadReleaseError != nil { retryError = g.handleGithubErr(downloadReleaseError, "failed to download file %q from %q release", *release.TagName, fileName) - // return immediately if we are rate limited + // Return immediately if we are rate limited. if _, ok := downloadReleaseError.(*github.RateLimitError); ok { return false, retryError } return false, nil } + defer reader.Close() + if redirect != "" { - // NOTE: DownloadReleaseAsset should not return a redirect address when used with the DefaultClient + // NOTE: DownloadReleaseAsset should not return a redirect address when used with the DefaultClient. retryError = errors.New("unexpected redirect while downloading the release asset") return true, retryError } + // Read contents from the reader (redirect or not), and return. + var err error + content, err = io.ReadAll(reader) + if err != nil { + retryError = errors.Wrapf(err, "failed to read downloaded file %q from %q release", *release.TagName, fileName) + return false, nil + } + retryError = nil return true, nil }) - if reader != nil { - defer reader.Close() - } if retryError != nil { return nil, retryError } - // Read contents from the reader (redirect or not), and return. - content, err := io.ReadAll(reader) - if err != nil { - return nil, errors.Wrapf(err, "failed to read downloaded file %q from %q release", *release.TagName, fileName) - } - - cacheFiles[cacheID] = content return content, nil } @@ -418,5 +503,10 @@ func (g *gitHubRepository) handleGithubErr(err error, message string, args ...in if _, ok := err.(*github.RateLimitError); ok { return errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable") } + if ghErr, ok := err.(*github.ErrorResponse); ok { + if ghErr.Response.StatusCode == http.StatusNotFound { + return errNotFound + } + } return errors.Wrapf(err, message, args...) } diff --git a/cmd/clusterctl/client/repository/repository_github_test.go b/cmd/clusterctl/client/repository/repository_github_test.go index a24631102704..7535ddcb9a19 100644 --- a/cmd/clusterctl/client/repository/repository_github_test.go +++ b/cmd/clusterctl/client/repository/repository_github_test.go @@ -17,21 +17,22 @@ limitations under the License. package repository import ( + "context" "fmt" "net/http" - "net/http/httptest" - "net/url" + "strings" "testing" "time" - "github.com/google/go-github/v48/github" + "github.com/google/go-github/v53/github" . "github.com/onsi/gomega" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" "sigs.k8s.io/cluster-api/internal/goproxy" + goproxytest "sigs.k8s.io/cluster-api/internal/goproxy/test" ) func Test_gitHubRepository_GetVersions(t *testing.T) { @@ -41,41 +42,42 @@ func Test_gitHubRepository_GetVersions(t *testing.T) { client, mux, teardown := test.NewFakeGitHub() defer teardown() - // setup an handler for returning 5 fake releases + // Setup an handler for returning 5 fake releases. mux.HandleFunc("/repos/o/r1/releases", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, `[`) fmt.Fprint(w, `{"id":1, "tag_name": "v0.4.0"},`) fmt.Fprint(w, `{"id":2, "tag_name": "v0.4.1"},`) fmt.Fprint(w, `{"id":3, "tag_name": "v0.4.2"},`) - fmt.Fprint(w, `{"id":4, "tag_name": "v0.4.3-alpha"}`) // prerelease + fmt.Fprint(w, `{"id":4, "tag_name": "v0.4.3-alpha"}`) // Pre-release fmt.Fprint(w, `]`) }) - clientGoproxy, muxGoproxy, teardownGoproxy := newFakeGoproxy() + scheme, host, muxGoproxy, teardownGoproxy := goproxytest.NewFakeGoproxy() + clientGoproxy := goproxy.NewClient(scheme, host) defer teardownGoproxy() - // setup an handler for returning 4 fake releases + // Setup a handler for returning 4 fake releases. muxGoproxy.HandleFunc("/github.com/o/r2/@v/list", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, "v0.5.0\n") fmt.Fprint(w, "v0.4.0\n") fmt.Fprint(w, "v0.3.2\n") fmt.Fprint(w, "v0.3.1\n") }) - // setup an handler for returning 3 different major fake releases + // Setup a handler for returning 3 different major fake releases. muxGoproxy.HandleFunc("/github.com/o/r3/@v/list", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, "v1.0.0\n") fmt.Fprint(w, "v0.1.0\n") }) muxGoproxy.HandleFunc("/github.com/o/r3/v2/@v/list", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, "v2.0.0\n") }) muxGoproxy.HandleFunc("/github.com/o/r3/v3/@v/list", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, "v3.0.0\n") }) @@ -114,17 +116,20 @@ func Test_gitHubRepository_GetVersions(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + resetCaches() - gRepo, err := NewGitHubRepository(tt.providerConfig, configVariablesClient, injectGithubClient(client), injectGoproxyClient(clientGoproxy)) - g.Expect(err).NotTo(HaveOccurred()) + gRepo, err := NewGitHubRepository(ctx, tt.providerConfig, configVariablesClient, injectGithubClient(client), injectGoproxyClient(clientGoproxy)) + g.Expect(err).ToNot(HaveOccurred()) - got, err := gRepo.GetVersions() + got, err := gRepo.GetVersions(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } @@ -214,13 +219,13 @@ func Test_githubRepository_newGitHubRepository(t *testing.T) { g := NewWithT(t) resetCaches() - gitHub, err := NewGitHubRepository(tt.field.providerConfig, tt.field.variableClient) + gitHub, err := NewGitHubRepository(context.Background(), tt.field.providerConfig, tt.field.variableClient) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(gitHub).To(Equal(tt.want)) }) } @@ -265,15 +270,15 @@ func Test_githubRepository_getFile(t *testing.T) { providerConfig := config.NewProvider("test", "https://github.com/o/r/releases/v0.4.1/file.yaml", clusterctlv1.CoreProviderType) - // test.NewFakeGitHub and handler for returning a fake release + // Setup a handler for returning a fake release. mux.HandleFunc("/repos/o/r/releases/tags/v0.4.1", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, `{"id":13, "tag_name": "v0.4.1", "assets": [{"id": 1, "name": "file.yaml"}] }`) }) - // test.NewFakeGitHub an handler for returning a fake release asset + // Setup a handler for returning a fake release asset. mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Disposition", "attachment; filename=file.yaml") fmt.Fprint(w, "content") @@ -316,16 +321,16 @@ func Test_githubRepository_getFile(t *testing.T) { g := NewWithT(t) resetCaches() - gitHub, err := NewGitHubRepository(providerConfig, configVariablesClient, injectGithubClient(client)) - g.Expect(err).NotTo(HaveOccurred()) + gitHub, err := NewGitHubRepository(context.Background(), providerConfig, configVariablesClient, injectGithubClient(client)) + g.Expect(err).ToNot(HaveOccurred()) - got, err := gitHub.GetFile(tt.release, tt.fileName) + got, err := gitHub.GetFile(context.Background(), tt.release, tt.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } @@ -337,16 +342,36 @@ func Test_gitHubRepository_getVersions(t *testing.T) { client, mux, teardown := test.NewFakeGitHub() defer teardown() - // setup an handler for returning 5 fake releases + // Setup a handler for returning fake releases in a paginated manner + // Each response contains a link to the next page (if available) which + // is parsed by the handler to navigate through all pages mux.HandleFunc("/repos/o/r1/releases", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") - fmt.Fprint(w, `[`) - fmt.Fprint(w, `{"id":1, "tag_name": "v0.4.0"},`) - fmt.Fprint(w, `{"id":2, "tag_name": "v0.4.1"},`) - fmt.Fprint(w, `{"id":3, "tag_name": "v0.4.2"},`) - fmt.Fprint(w, `{"id":4, "tag_name": "v0.4.3-alpha"},`) // prerelease - fmt.Fprint(w, `{"id":5, "tag_name": "foo"}`) // no semantic version tag - fmt.Fprint(w, `]`) + goproxytest.HTTPTestMethod(t, r, "GET") + page := r.URL.Query().Get("page") + switch page { + case "", "1": + // Page 1 + w.Header().Set("Link", `; rel="next"`) // Link to page 2 + fmt.Fprint(w, `[`) + fmt.Fprint(w, `{"id":1, "tag_name": "v0.4.0"},`) + fmt.Fprint(w, `{"id":2, "tag_name": "v0.4.1"}`) + fmt.Fprint(w, `]`) + case "2": + // Page 2 + w.Header().Set("Link", `; rel="next"`) // Link to page 3 + fmt.Fprint(w, `[`) + fmt.Fprint(w, `{"id":3, "tag_name": "v0.4.2"},`) + fmt.Fprint(w, `{"id":4, "tag_name": "v0.4.3-alpha"}`) // Pre-release + fmt.Fprint(w, `]`) + case "3": + // Page 3 (last page) + fmt.Fprint(w, `[`) + fmt.Fprint(w, `{"id":4, "tag_name": "v0.4.4-beta"},`) // Pre-release + fmt.Fprint(w, `{"id":5, "tag_name": "foo"}`) // No semantic version tag + fmt.Fprint(w, `]`) + default: + t.Fatalf("unexpected page requested") + } }) configVariablesClient := test.NewFakeVariableClient() @@ -361,28 +386,31 @@ func Test_gitHubRepository_getVersions(t *testing.T) { wantErr bool }{ { - name: "Get versions", + name: "Get versions with all releases", field: field{ providerConfig: config.NewProvider("test", "https://github.com/o/r1/releases/v0.4.1/path", clusterctlv1.CoreProviderType), }, - want: []string{"v0.4.0", "v0.4.1", "v0.4.2", "v0.4.3-alpha"}, + want: []string{"v0.4.0", "v0.4.1", "v0.4.2", "v0.4.3-alpha", "v0.4.4-beta"}, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + resetCaches() - gitHub, err := NewGitHubRepository(tt.field.providerConfig, configVariablesClient, injectGithubClient(client)) - g.Expect(err).NotTo(HaveOccurred()) + gitHub, err := NewGitHubRepository(ctx, tt.field.providerConfig, configVariablesClient, injectGithubClient(client)) + g.Expect(err).ToNot(HaveOccurred()) - got, err := gitHub.(*gitHubRepository).getVersions() + got, err := gitHub.(*gitHubRepository).getVersions(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(ConsistOf(tt.want)) }) @@ -395,26 +423,49 @@ func Test_gitHubRepository_getLatestContractRelease(t *testing.T) { client, mux, teardown := test.NewFakeGitHub() defer teardown() - // test.NewFakeGitHub and handler for returning a fake release + // Setup a handler for returning a fake release. mux.HandleFunc("/repos/o/r1/releases/tags/v0.5.0", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, `{"id":13, "tag_name": "v0.5.0", "assets": [{"id": 1, "name": "metadata.yaml"}] }`) }) - // test.NewFakeGitHub an handler for returning a fake release metadata file + mux.HandleFunc("/repos/o/r1/releases/tags/v0.3.2", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + fmt.Fprint(w, `{"id":14, "tag_name": "v0.3.2", "assets": [{"id": 2, "name": "metadata.yaml"}] }`) + }) + + // Setup a handler for returning a fake release metadata file. mux.HandleFunc("/repos/o/r1/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", "attachment; filename=metadata.yaml") + fmt.Fprint(w, "apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3\nreleaseSeries:\n - major: 0\n minor: 4\n contract: v1alpha4\n - major: 0\n minor: 5\n contract: v1alpha4\n - major: 0\n minor: 3\n contract: v1alpha3\n") + }) + + mux.HandleFunc("/repos/o/r1/releases/assets/2", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Disposition", "attachment; filename=metadata.yaml") fmt.Fprint(w, "apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3\nreleaseSeries:\n - major: 0\n minor: 4\n contract: v1alpha4\n - major: 0\n minor: 5\n contract: v1alpha4\n - major: 0\n minor: 3\n contract: v1alpha3\n") }) - clientGoproxy, muxGoproxy, teardownGoproxy := newFakeGoproxy() + scheme, host, muxGoproxy, teardownGoproxy := goproxytest.NewFakeGoproxy() + clientGoproxy := goproxy.NewClient(scheme, host) + defer teardownGoproxy() - // setup an handler for returning 4 fake releases + // Setup a handler for returning 4 fake releases. muxGoproxy.HandleFunc("/github.com/o/r1/@v/list", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") + fmt.Fprint(w, "v0.5.0\n") + fmt.Fprint(w, "v0.4.0\n") + fmt.Fprint(w, "v0.3.2\n") + fmt.Fprint(w, "v0.3.1\n") + }) + + // setup an handler for returning 4 fake releases but no actual tagged release + muxGoproxy.HandleFunc("/github.com/o/r2/@v/list", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, "v0.5.0\n") fmt.Fprint(w, "v0.4.0\n") fmt.Fprint(w, "v0.3.2\n") @@ -460,21 +511,30 @@ func Test_gitHubRepository_getLatestContractRelease(t *testing.T) { contract: "foo", wantErr: false, }, + { + name: "Return 404 if there is no release for the tag", + field: field{ + providerConfig: config.NewProvider("test", "https://github.com/o/r2/releases/v0.99.0/path", clusterctlv1.CoreProviderType), + }, + want: "0.99.0", + contract: "v1alpha4", + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) resetCaches() - gRepo, err := NewGitHubRepository(tt.field.providerConfig, configVariablesClient, injectGithubClient(client), injectGoproxyClient(clientGoproxy)) - g.Expect(err).NotTo(HaveOccurred()) + gRepo, err := NewGitHubRepository(context.Background(), tt.field.providerConfig, configVariablesClient, injectGithubClient(client), injectGoproxyClient(clientGoproxy)) + g.Expect(err).ToNot(HaveOccurred()) - got, err := latestContractRelease(gRepo, tt.contract) + got, err := latestContractRelease(context.Background(), gRepo, tt.contract) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } @@ -483,32 +543,60 @@ func Test_gitHubRepository_getLatestContractRelease(t *testing.T) { func Test_gitHubRepository_getLatestRelease(t *testing.T) { retryableOperationInterval = 200 * time.Millisecond retryableOperationTimeout = 1 * time.Second - clientGoproxy, muxGoproxy, teardownGoproxy := newFakeGoproxy() + scheme, host, muxGoproxy, teardownGoproxy := goproxytest.NewFakeGoproxy() + clientGoproxy := goproxy.NewClient(scheme, host) defer teardownGoproxy() - // setup an handler for returning 4 fake releases + client, mux, teardown := test.NewFakeGitHub() + defer teardown() + + // Setup a handler for returning 4 fake releases. muxGoproxy.HandleFunc("/github.com/o/r1/@v/list", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, "v0.4.1\n") fmt.Fprint(w, "v0.4.2\n") fmt.Fprint(w, "v0.4.3-alpha\n") // prerelease fmt.Fprint(w, "foo\n") // no semantic version tag }) + // And also expose a release for them + mux.HandleFunc("/repos/o/r1/releases/tags/v0.4.2", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + fmt.Fprint(w, `{"id":13, "tag_name": "v0.4.2", "assets": [{"id": 1, "name": "metadata.yaml"}] }`) + }) + mux.HandleFunc("/repos/o/r3/releases/tags/v0.1.0-alpha.2", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + fmt.Fprint(w, `{"id":14, "tag_name": "v0.1.0-alpha.2", "assets": [{"id": 2, "name": "metadata.yaml"}] }`) + }) - // setup an handler for returning no releases - muxGoproxy.HandleFunc("/github.com/o/r2/@v/list", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + // Setup a handler for returning no releases. + muxGoproxy.HandleFunc("/github.com/o/r2/@v/list", func(_ http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") // no releases }) - // setup an handler for returning fake prereleases only + // Setup a handler for returning fake prereleases only. muxGoproxy.HandleFunc("/github.com/o/r3/@v/list", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, "v0.1.0-alpha.0\n") fmt.Fprint(w, "v0.1.0-alpha.1\n") fmt.Fprint(w, "v0.1.0-alpha.2\n") }) + // Setup a handler for returning a fake release metadata file. + mux.HandleFunc("/repos/o/r1/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", "attachment; filename=metadata.yaml") + fmt.Fprint(w, "apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3\nreleaseSeries:\n - major: 0\n minor: 4\n contract: v1alpha4\n - major: 0\n minor: 5\n contract: v1alpha4\n - major: 0\n minor: 3\n contract: v1alpha3\n") + }) + + mux.HandleFunc("/repos/o/r3/releases/assets/2", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", "attachment; filename=metadata.yaml") + fmt.Fprint(w, "apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3\nreleaseSeries:\n - major: 0\n minor: 4\n contract: v1alpha4\n - major: 0\n minor: 5\n contract: v1alpha4\n - major: 0\n minor: 3\n contract: v1alpha3\n") + }) + configVariablesClient := test.NewFakeVariableClient() type field struct { @@ -523,7 +611,7 @@ func Test_gitHubRepository_getLatestRelease(t *testing.T) { { name: "Get latest release, ignores pre-release version", field: field{ - providerConfig: config.NewProvider("test", "https://github.com/o/r1/releases/latest/path", clusterctlv1.CoreProviderType), + providerConfig: config.NewProvider("test", "https://github.com/o/r1/releases/v0.4.2/path", clusterctlv1.CoreProviderType), }, want: "v0.4.2", wantErr: false, @@ -539,7 +627,7 @@ func Test_gitHubRepository_getLatestRelease(t *testing.T) { { name: "Falls back to latest prerelease when no official release present", field: field{ - providerConfig: config.NewProvider("test", "https://github.com/o/r3/releases/latest/path", clusterctlv1.CoreProviderType), + providerConfig: config.NewProvider("test", "https://github.com/o/r3/releases/v0.1.0-alpha.2/path", clusterctlv1.CoreProviderType), }, want: "v0.1.0-alpha.2", wantErr: false, @@ -548,17 +636,20 @@ func Test_gitHubRepository_getLatestRelease(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + resetCaches() - gRepo, err := NewGitHubRepository(tt.field.providerConfig, configVariablesClient, injectGoproxyClient(clientGoproxy)) - g.Expect(err).NotTo(HaveOccurred()) + gRepo, err := NewGitHubRepository(ctx, tt.field.providerConfig, configVariablesClient, injectGoproxyClient(clientGoproxy), injectGithubClient(client)) + g.Expect(err).ToNot(HaveOccurred()) - got, err := latestRelease(gRepo) + got, err := latestRelease(ctx, gRepo) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) g.Expect(gRepo.(*gitHubRepository).defaultVersion).To(Equal(tt.want)) }) @@ -568,17 +659,45 @@ func Test_gitHubRepository_getLatestRelease(t *testing.T) { func Test_gitHubRepository_getLatestPatchRelease(t *testing.T) { retryableOperationInterval = 200 * time.Millisecond retryableOperationTimeout = 1 * time.Second - clientGoproxy, muxGoproxy, teardownGoproxy := newFakeGoproxy() + scheme, host, muxGoproxy, teardownGoproxy := goproxytest.NewFakeGoproxy() + clientGoproxy := goproxy.NewClient(scheme, host) defer teardownGoproxy() - // setup an handler for returning 4 fake releases + client, mux, teardown := test.NewFakeGitHub() + defer teardown() + + // Setup a handler for returning 4 fake releases. muxGoproxy.HandleFunc("/github.com/o/r1/@v/list", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, "v0.4.0\n") fmt.Fprint(w, "v0.3.2\n") fmt.Fprint(w, "v1.3.2\n") }) + // Setup a handler for returning a fake release. + mux.HandleFunc("/repos/o/r1/releases/tags/v0.4.0", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + fmt.Fprint(w, `{"id":13, "tag_name": "v0.4.0", "assets": [{"id": 1, "name": "metadata.yaml"}] }`) + }) + + mux.HandleFunc("/repos/o/r1/releases/tags/v0.3.2", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + fmt.Fprint(w, `{"id":14, "tag_name": "v0.3.2", "assets": [{"id": 1, "name": "metadata.yaml"}] }`) + }) + + mux.HandleFunc("/repos/o/r1/releases/tags/v1.3.2", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + fmt.Fprint(w, `{"id":15, "tag_name": "v1.3.2", "assets": [{"id": 1, "name": "metadata.yaml"}] }`) + }) + + // Setup a handler for returning a fake release metadata file. + mux.HandleFunc("/repos/o/r1/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", "attachment; filename=metadata.yaml") + fmt.Fprint(w, "apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3\nreleaseSeries:\n - major: 0\n minor: 4\n contract: v1alpha4\n - major: 0\n minor: 5\n contract: v1alpha4\n - major: 0\n minor: 3\n contract: v1alpha3\n") + }) + major0 := uint(0) minor3 := uint(3) minor4 := uint(4) @@ -599,7 +718,7 @@ func Test_gitHubRepository_getLatestPatchRelease(t *testing.T) { { name: "Get latest patch release, no Major/Minor specified", field: field{ - providerConfig: config.NewProvider("test", "https://github.com/o/r1/releases/latest/path", clusterctlv1.CoreProviderType), + providerConfig: config.NewProvider("test", "https://github.com/o/r1/releases/v1.3.2/path", clusterctlv1.CoreProviderType), }, minor: nil, major: nil, @@ -609,7 +728,7 @@ func Test_gitHubRepository_getLatestPatchRelease(t *testing.T) { { name: "Get latest patch release, for Major 0 and Minor 3", field: field{ - providerConfig: config.NewProvider("test", "https://github.com/o/r1/releases/latest/path", clusterctlv1.CoreProviderType), + providerConfig: config.NewProvider("test", "https://github.com/o/r1/releases/v0.3.2/path", clusterctlv1.CoreProviderType), }, major: &major0, minor: &minor3, @@ -619,7 +738,7 @@ func Test_gitHubRepository_getLatestPatchRelease(t *testing.T) { { name: "Get latest patch release, for Major 0 and Minor 4", field: field{ - providerConfig: config.NewProvider("test", "https://github.com/o/r1/releases/latest/path", clusterctlv1.CoreProviderType), + providerConfig: config.NewProvider("test", "https://github.com/o/r1/releases/v0.4.0/path", clusterctlv1.CoreProviderType), }, major: &major0, minor: &minor4, @@ -630,17 +749,20 @@ func Test_gitHubRepository_getLatestPatchRelease(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + resetCaches() - gRepo, err := NewGitHubRepository(tt.field.providerConfig, configVariablesClient, injectGoproxyClient(clientGoproxy)) - g.Expect(err).NotTo(HaveOccurred()) + gRepo, err := NewGitHubRepository(ctx, tt.field.providerConfig, configVariablesClient, injectGoproxyClient(clientGoproxy), injectGithubClient(client)) + g.Expect(err).ToNot(HaveOccurred()) - got, err := latestPatchRelease(gRepo, tt.major, tt.minor) + got, err := latestPatchRelease(ctx, gRepo, tt.major, tt.minor) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } @@ -654,9 +776,9 @@ func Test_gitHubRepository_getReleaseByTag(t *testing.T) { providerConfig := config.NewProvider("test", "https://github.com/o/r/releases/v0.4.1/path", clusterctlv1.CoreProviderType) - // setup and handler for returning a fake release + // Setup a handler for returning a fake release. mux.HandleFunc("/repos/o/r/releases/tags/foo", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") fmt.Fprint(w, `{"id":13, "tag_name": "v0.4.1"}`) }) @@ -676,7 +798,7 @@ func Test_gitHubRepository_getReleaseByTag(t *testing.T) { args: args{ tag: "foo", }, - wantTagName: pointer.String("v0.4.1"), + wantTagName: ptr.To("v0.4.1"), wantErr: false, }, { @@ -691,17 +813,20 @@ func Test_gitHubRepository_getReleaseByTag(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + resetCaches() - gRepo, err := NewGitHubRepository(providerConfig, configVariablesClient, injectGithubClient(client)) - g.Expect(err).NotTo(HaveOccurred()) + gRepo, err := NewGitHubRepository(ctx, providerConfig, configVariablesClient, injectGithubClient(client)) + g.Expect(err).ToNot(HaveOccurred()) - got, err := gRepo.(*gitHubRepository).getReleaseByTag(tt.args.tag) + got, err := gRepo.(*gitHubRepository).getReleaseByTag(ctx, tt.args.tag) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) if tt.wantTagName == nil { g.Expect(got).To(BeNil()) @@ -722,16 +847,16 @@ func Test_gitHubRepository_downloadFilesFromRelease(t *testing.T) { providerConfig := config.NewProvider("test", "https://github.com/o/r/releases/v0.4.1/file.yaml", clusterctlv1.CoreProviderType) // tree/main/path not relevant for the test providerConfigWithRedirect := config.NewProvider("test", "https://github.com/o/r-with-redirect/releases/v0.4.1/file.yaml", clusterctlv1.CoreProviderType) // tree/main/path not relevant for the test - // test.NewFakeGitHub an handler for returning a fake release asset + // Setup a handler for returning a fake release asset. mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Disposition", "attachment; filename=file.yaml") fmt.Fprint(w, "content") }) - // handler which redirects to a different location + // Setup a handler which redirects to a different location. mux.HandleFunc("/repos/o/r-with-redirect/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") http.Redirect(w, r, "/api-v3/repos/o/r/releases/assets/1", http.StatusFound) }) @@ -829,29 +954,21 @@ func Test_gitHubRepository_downloadFilesFromRelease(t *testing.T) { g := NewWithT(t) resetCaches() - gRepo, err := NewGitHubRepository(tt.providerConfig, configVariablesClient, injectGithubClient(client)) - g.Expect(err).NotTo(HaveOccurred()) + gRepo, err := NewGitHubRepository(context.Background(), tt.providerConfig, configVariablesClient, injectGithubClient(client)) + g.Expect(err).ToNot(HaveOccurred()) - got, err := gRepo.(*gitHubRepository).downloadFilesFromRelease(tt.args.release, tt.args.fileName) + got, err := gRepo.(*gitHubRepository).downloadFilesFromRelease(context.Background(), tt.args.release, tt.args.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } } -func testMethod(t *testing.T, r *http.Request, want string) { - t.Helper() - - if got := r.Method; got != want { - t.Errorf("Request method: %v, want %v", got, want) - } -} - // resetCaches is called repeatedly throughout tests to help avoid cross-test pollution. func resetCaches() { cacheVersions = map[string][]string{} @@ -859,20 +976,135 @@ func resetCaches() { cacheFiles = map[string][]byte{} } -// newFakeGoproxy sets up a test HTTP server along with a github.Client that is -// configured to talk to that test server. Tests should register handlers on -// mux which provide mock responses for the API method being tested. -func newFakeGoproxy() (client *goproxy.Client, mux *http.ServeMux, teardown func()) { - // mux is the HTTP request multiplexer used with the test server. - mux = http.NewServeMux() +func Test_gitHubRepository_releaseNotFound(t *testing.T) { + retryableOperationInterval = 200 * time.Millisecond + retryableOperationTimeout = 1 * time.Second + + tests := []struct { + name string + releaseTags []string + ghReleases []string + want string + wantErr bool + }{ + { + name: "One release", + releaseTags: []string{"v0.4.2"}, + ghReleases: []string{"v0.4.2"}, + want: "v0.4.2", + wantErr: false, + }, + { + name: "Latest tag without a release", + releaseTags: []string{"v0.5.0", "v0.4.2"}, + ghReleases: []string{"v0.4.2"}, + want: "v0.4.2", + wantErr: false, + }, + { + name: "Two tags without releases", + releaseTags: []string{"v0.6.0", "v0.5.0", "v0.4.2"}, + ghReleases: []string{"v0.4.2"}, + want: "v0.4.2", + wantErr: false, + }, + { + name: "Five tags without releases", + releaseTags: []string{"v0.9.0", "v0.8.0", "v0.7.0", "v0.6.0", "v0.5.0", "v0.4.2"}, + ghReleases: []string{"v0.4.2"}, + wantErr: true, + }, + { + name: "Pre-releases have lower priority", + releaseTags: []string{"v0.7.0-alpha", "v0.6.0-alpha", "v0.5.0-alpha", "v0.4.2"}, + ghReleases: []string{"v0.4.2"}, + want: "v0.4.2", + wantErr: false, + }, + { + name: "Two Github releases", + releaseTags: []string{"v0.7.0", "v0.6.0", "v0.5.0", "v0.4.2"}, + ghReleases: []string{"v0.5.0", "v0.4.2"}, + want: "v0.5.0", + wantErr: false, + }, + { + name: "Github release and prerelease", + releaseTags: []string{"v0.6.0", "v0.5.0-alpha", "v0.4.2"}, + ghReleases: []string{"v0.5.0-alpha", "v0.4.2"}, + want: "v0.4.2", + wantErr: false, + }, + { + name: "No Github releases", + releaseTags: []string{"v0.6.0", "v0.5.0", "v0.4.2"}, + ghReleases: []string{}, + wantErr: true, + }, + { + name: "Pre-releases only", + releaseTags: []string{"v0.6.0-alpha", "v0.5.0-alpha", "v0.4.2-alpha"}, + ghReleases: []string{"v0.5.0-alpha"}, + want: "v0.5.0-alpha", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() - apiHandler := http.NewServeMux() - apiHandler.Handle("/", mux) + configVariablesClient := test.NewFakeVariableClient() - // server is a test HTTP server used to provide mock API responses. - server := httptest.NewServer(apiHandler) + resetCaches() - // client is the GitHub client being tested and is configured to use test server. - url, _ := url.Parse(server.URL + "/") - return goproxy.NewClient(url.Scheme, url.Host), mux, server.Close + client, mux, teardown := test.NewFakeGitHub() + defer teardown() + + providerConfig := config.NewProvider("test", "https://github.com/o/r1/releases/v0.4.1/file.yaml", clusterctlv1.CoreProviderType) + + scheme, host, muxGoproxy, teardownGoproxy := goproxytest.NewFakeGoproxy() + clientGoproxy := goproxy.NewClient(scheme, host) + + defer teardownGoproxy() + + // First, register tags within goproxy. + muxGoproxy.HandleFunc("/github.com/o/r1/@v/list", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + for _, release := range tt.releaseTags { + fmt.Fprint(w, release+"\n") + } + }) + + // Second, register releases in GitHub. + for _, release := range tt.ghReleases { + mux.HandleFunc(fmt.Sprintf("/repos/o/r1/releases/tags/%s", release), func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + parts := strings.Split(r.RequestURI, "/") + version := parts[len(parts)-1] + fmt.Fprintf(w, "{\"id\":13, \"tag_name\": %q, \"assets\": [{\"id\": 1, \"name\": \"metadata.yaml\"}] }", version) + }) + } + + // Third, setup a handler for returning a fake release metadata file. + mux.HandleFunc("/repos/o/r1/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { + goproxytest.HTTPTestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", "attachment; filename=metadata.yaml") + fmt.Fprint(w, "apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3\nreleaseSeries:\n - major: 0\n minor: 4\n contract: v1alpha4\n - major: 0\n minor: 5\n contract: v1alpha4\n - major: 0\n minor: 3\n contract: v1alpha3\n") + }) + + gRepo, err := NewGitHubRepository(ctx, providerConfig, configVariablesClient, injectGithubClient(client), injectGoproxyClient(clientGoproxy)) + g.Expect(err).ToNot(HaveOccurred()) + + got, err := latestRelease(ctx, gRepo) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(Equal(tt.want)) + }) + } } diff --git a/cmd/clusterctl/client/repository/repository_gitlab.go b/cmd/clusterctl/client/repository/repository_gitlab.go index 45265b56e721..9657e357d080 100644 --- a/cmd/clusterctl/client/repository/repository_gitlab.go +++ b/cmd/clusterctl/client/repository/repository_gitlab.go @@ -117,7 +117,7 @@ func (g *gitLabRepository) DefaultVersion() string { } // GetVersions returns the list of versions that are available in a provider repository. -func (g *gitLabRepository) GetVersions() ([]string, error) { +func (g *gitLabRepository) GetVersions(_ context.Context) ([]string, error) { // FIXME Get versions from GitLab API return []string{g.defaultVersion}, nil } @@ -133,8 +133,7 @@ func (g *gitLabRepository) ComponentsPath() string { } // GetFile returns a file for a given provider version. -func (g *gitLabRepository) GetFile(version, path string) ([]byte, error) { - ctx := context.TODO() +func (g *gitLabRepository) GetFile(ctx context.Context, version, path string) ([]byte, error) { url := fmt.Sprintf( "https://%s/api/v4/projects/%s/packages/generic/%s/%s/%s", g.host, diff --git a/cmd/clusterctl/client/repository/repository_gitlab_test.go b/cmd/clusterctl/client/repository/repository_gitlab_test.go index 5e8a8bb61500..8844084dae5c 100644 --- a/cmd/clusterctl/client/repository/repository_gitlab_test.go +++ b/cmd/clusterctl/client/repository/repository_gitlab_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "fmt" "net/http" "net/http/httptest" @@ -27,6 +28,7 @@ import ( clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" + goproxytest "sigs.k8s.io/cluster-api/internal/goproxy/test" ) func Test_gitLabRepository_newGitLabRepository(t *testing.T) { @@ -135,7 +137,7 @@ func Test_gitLabRepository_newGitLabRepository(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(gitLab).To(Equal(tt.want)) }) } @@ -151,7 +153,7 @@ func Test_gitLabRepository_getFile(t *testing.T) { providerConfig := config.NewProvider("test", providerURL, clusterctlv1.CoreProviderType) mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "GET") + goproxytest.HTTPTestMethod(t, r, "GET") if r.URL.RawPath == "/api/v4/projects/group%2Fproject/packages/generic/my-package/v0.4.1/file.yaml" { w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Disposition", "attachment; filename=file.yaml") @@ -193,15 +195,15 @@ func Test_gitLabRepository_getFile(t *testing.T) { gitLab, err := NewGitLabRepository(providerConfig, configVariablesClient) gitLab.(*gitLabRepository).httpClient = client - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) - got, err := gitLab.GetFile(tt.version, tt.fileName) + got, err := gitLab.GetFile(context.Background(), tt.version, tt.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } diff --git a/cmd/clusterctl/client/repository/repository_local.go b/cmd/clusterctl/client/repository/repository_local.go index 906bdb2c52ab..16ced216f888 100644 --- a/cmd/clusterctl/client/repository/repository_local.go +++ b/cmd/clusterctl/client/repository/repository_local.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "net/url" "os" "path/filepath" @@ -82,11 +83,11 @@ func (r *localRepository) ComponentsPath() string { } // GetFile returns a file for a given provider version. -func (r *localRepository) GetFile(version, fileName string) ([]byte, error) { +func (r *localRepository) GetFile(ctx context.Context, version, fileName string) ([]byte, error) { var err error if version == latestVersionTag { - version, err = latestRelease(r) + version, err = latestRelease(ctx, r) if err != nil { return nil, errors.Wrapf(err, "failed to get the latest release") } @@ -111,7 +112,7 @@ func (r *localRepository) GetFile(version, fileName string) ([]byte, error) { } // GetVersions returns the list of versions that are available for a local repository. -func (r *localRepository) GetVersions() ([]string, error) { +func (r *localRepository) GetVersions(_ context.Context) ([]string, error) { // get all the sub-directories under {basepath}/{provider-id}/ releasesPath := filepath.Join(r.basepath, r.providerLabel) files, err := os.ReadDir(releasesPath) @@ -135,7 +136,7 @@ func (r *localRepository) GetVersions() ([]string, error) { } // newLocalRepository returns a new localRepository. -func newLocalRepository(providerConfig config.Provider, configVariablesClient config.VariablesClient) (*localRepository, error) { +func newLocalRepository(ctx context.Context, providerConfig config.Provider, configVariablesClient config.VariablesClient) (*localRepository, error) { url, err := url.Parse(providerConfig.URL()) if err != nil { return nil, errors.Wrap(err, "invalid url") @@ -189,7 +190,7 @@ func newLocalRepository(providerConfig config.Provider, configVariablesClient co } if defaultVersion == latestVersionTag { - repo.defaultVersion, err = latestContractRelease(repo, clusterv1.GroupVersion.Version) + repo.defaultVersion, err = latestContractRelease(ctx, repo, clusterv1.GroupVersion.Version) if err != nil { return nil, errors.Wrap(err, "failed to get latest version") } diff --git a/cmd/clusterctl/client/repository/repository_local_test.go b/cmd/clusterctl/client/repository/repository_local_test.go index 18f41b6caa5c..0c6234ce9d05 100644 --- a/cmd/clusterctl/client/repository/repository_local_test.go +++ b/cmd/clusterctl/client/repository/repository_local_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "os" "path/filepath" "testing" @@ -28,6 +29,10 @@ import ( "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" ) +const ( + metadataContents = "apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3\nreleaseSeries:\n - major: 0\n minor: 4\n contract: v1alpha4\n - major: 0\n minor: 5\n contract: v1alpha4\n - major: 0\n minor: 3\n contract: v1alpha3\n" +) + func Test_localRepository_newLocalRepository(t *testing.T) { type fields struct { provider config.Provider @@ -108,12 +113,12 @@ func Test_localRepository_newLocalRepository(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := newLocalRepository(tt.fields.provider, tt.fields.configVariablesClient) + got, err := newLocalRepository(context.Background(), tt.fields.provider, tt.fields.configVariablesClient) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got.basepath).To(Equal(tt.want.basepath)) g.Expect(got.providerLabel).To(Equal(tt.want.providerLabel)) @@ -156,6 +161,7 @@ func Test_localRepository_newLocalRepository_Latest(t *testing.T) { // Create several release directories createLocalTestProviderFile(t, tmpDir, "bootstrap-foo/v1.0.0/bootstrap-components.yaml", "foo: bar") createLocalTestProviderFile(t, tmpDir, "bootstrap-foo/v1.0.1/bootstrap-components.yaml", "foo: bar") + createLocalTestProviderFile(t, tmpDir, "bootstrap-foo/v1.0.1/metadata.yaml", metadataContents) createLocalTestProviderFile(t, tmpDir, "bootstrap-foo/v2.0.0-alpha.0/bootstrap-components.yaml", "foo: bar") createLocalTestProviderFile(t, tmpDir, "bootstrap-foo/Foo.Bar/bootstrap-components.yaml", "foo: bar") createLocalTestProviderFile(t, tmpDir, "bootstrap-foo/foo.file", "foo: bar") @@ -165,8 +171,8 @@ func Test_localRepository_newLocalRepository_Latest(t *testing.T) { p2URLLatestAbs := filepath.Join(tmpDir, p2URLLatest) p2 := config.NewProvider("foo", p2URLLatestAbs, clusterctlv1.BootstrapProviderType) - got, err := newLocalRepository(p2, test.NewFakeVariableClient()) - g.Expect(err).NotTo(HaveOccurred()) + got, err := newLocalRepository(context.Background(), p2, test.NewFakeVariableClient()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got.basepath).To(Equal(tmpDir)) g.Expect(got.providerLabel).To(Equal("bootstrap-foo")) @@ -184,8 +190,10 @@ func Test_localRepository_GetFile(t *testing.T) { p1 := config.NewProvider("foo", dst1, clusterctlv1.BootstrapProviderType) // Provider 2: URL is for the latest release + createLocalTestProviderFile(t, tmpDir, "bootstrap-baz/v1.0.0-alpha.0/metadata.yaml", metadataContents) createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v1.0.0/bootstrap-components.yaml", "version: v1.0.0") createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v1.0.1/bootstrap-components.yaml", "version: v1.0.1") + createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v1.0.1/metadata.yaml", metadataContents) createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v2.0.0-alpha.0/bootstrap-components.yaml", "version: v2.0.0-alpha.0") createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/Foo.Bar/bootstrap-components.yaml", "version: Foo.Bar") createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/foo.file", "foo: bar") @@ -295,16 +303,16 @@ func Test_localRepository_GetFile(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - r, err := newLocalRepository(tt.fields.provider, tt.fields.configVariablesClient) - g.Expect(err).NotTo(HaveOccurred()) + r, err := newLocalRepository(context.Background(), tt.fields.provider, tt.fields.configVariablesClient) + g.Expect(err).ToNot(HaveOccurred()) - got, err := r.GetFile(tt.args.version, tt.args.fileName) + got, err := r.GetFile(context.Background(), tt.args.version, tt.args.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(string(got)).To(Equal(tt.want.contents)) }) } @@ -322,6 +330,7 @@ func Test_localRepository_GetVersions(t *testing.T) { createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v1.0.0/bootstrap-components.yaml", "version: v1.0.0") createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v1.0.1/bootstrap-components.yaml", "version: v1.0.1") createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v2.0.1/bootstrap-components.yaml", "version: v2.0.1") + createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v2.0.2+exp.sha.5114f85/metadata.yaml", metadataContents) createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v2.0.2+exp.sha.5114f85/bootstrap-components.yaml", "version: v2.0.2+exp.sha.5114f85") createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v2.0.3-alpha/bootstrap-components.yaml", "version: v2.0.3-alpha") createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/Foo.Bar/bootstrap-components.yaml", "version: Foo.Bar") @@ -370,15 +379,17 @@ func Test_localRepository_GetVersions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - r, err := newLocalRepository(tt.fields.provider, tt.fields.configVariablesClient) - g.Expect(err).NotTo(HaveOccurred()) + ctx := context.Background() + + r, err := newLocalRepository(ctx, tt.fields.provider, tt.fields.configVariablesClient) + g.Expect(err).ToNot(HaveOccurred()) - got, err := r.GetVersions() + got, err := r.GetVersions(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(ConsistOf(tt.want.versions)) }) diff --git a/cmd/clusterctl/client/repository/repository_memory.go b/cmd/clusterctl/client/repository/repository_memory.go index 2d340a7e7c1f..e1549ebb6b87 100644 --- a/cmd/clusterctl/client/repository/repository_memory.go +++ b/cmd/clusterctl/client/repository/repository_memory.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "fmt" "github.com/pkg/errors" @@ -73,13 +74,13 @@ func (f *MemoryRepository) ComponentsPath() string { // GetFile returns a file for a given provider version. // NOTE: If the provided version is missing, the default version is used. -func (f *MemoryRepository) GetFile(version string, path string) ([]byte, error) { +func (f *MemoryRepository) GetFile(ctx context.Context, version string, path string) ([]byte, error) { if version == "" { version = f.DefaultVersion() } if version == latestVersionTag { var err error - version, err = latestContractRelease(f, clusterv1.GroupVersion.Version) + version, err = latestContractRelease(ctx, f, clusterv1.GroupVersion.Version) if err != nil { return nil, err } @@ -97,7 +98,7 @@ func (f *MemoryRepository) GetFile(version string, path string) ([]byte, error) } // GetVersions returns the list of versions that are available. -func (f *MemoryRepository) GetVersions() ([]string, error) { +func (f *MemoryRepository) GetVersions(_ context.Context) ([]string, error) { v := make([]string, 0, len(f.versions)) for k := range f.versions { v = append(v, k) diff --git a/cmd/clusterctl/client/repository/repository_memory_test.go b/cmd/clusterctl/client/repository/repository_memory_test.go index aac885f1719c..baa7828262eb 100644 --- a/cmd/clusterctl/client/repository/repository_memory_test.go +++ b/cmd/clusterctl/client/repository/repository_memory_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "testing" . "github.com/onsi/gomega" @@ -124,15 +125,17 @@ releaseSeries: t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + r := tt.repo g.Expect(r.RootPath()).To(Equal("")) - g.Expect(r.GetFile(r.DefaultVersion(), r.ComponentsPath())).To(Equal(tt.want.defaultVersion)) - g.Expect(r.GetFile("", r.ComponentsPath())).To(Equal(tt.want.defaultVersion)) - g.Expect(r.GetFile("latest", r.ComponentsPath())).To(Equal(tt.want.latestVersion)) + g.Expect(r.GetFile(ctx, r.DefaultVersion(), r.ComponentsPath())).To(Equal(tt.want.defaultVersion)) + g.Expect(r.GetFile(ctx, "", r.ComponentsPath())).To(Equal(tt.want.defaultVersion)) + g.Expect(r.GetFile(ctx, "latest", r.ComponentsPath())).To(Equal(tt.want.latestVersion)) - got, err := r.GetVersions() - g.Expect(err).NotTo(HaveOccurred()) + got, err := r.GetVersions(ctx) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(ConsistOf(tt.want.versions)) }) } diff --git a/cmd/clusterctl/client/repository/repository_versions.go b/cmd/clusterctl/client/repository/repository_versions.go index f4b710408d47..599fb942f457 100644 --- a/cmd/clusterctl/client/repository/repository_versions.go +++ b/cmd/clusterctl/client/repository/repository_versions.go @@ -17,6 +17,9 @@ limitations under the License. package repository import ( + "context" + "sort" + "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -32,18 +35,23 @@ const ( // latestContractRelease returns the latest patch release for a repository for the current API contract, according to // semantic version order of the release tag name. -func latestContractRelease(repo Repository, contract string) (string, error) { - latest, err := latestRelease(repo) +func latestContractRelease(ctx context.Context, repo Repository, contract string) (string, error) { + latest, err := latestRelease(ctx, repo) if err != nil { return latest, err } // Attempt to check if the latest release satisfies the API Contract // This is a best-effort attempt to find the latest release for an older API contract if it's not the latest release. + file, err := repo.GetFile(ctx, latest, metadataFile) // If an error occurs, we just return the latest release. - file, err := repo.GetFile(latest, metadataFile) if err != nil { + if errors.Is(err, errNotFound) { + // If it was ErrNotFound, then there is no release yet for the resolved tag. + // Ref: https://github.com/kubernetes-sigs/cluster-api/issues/7889 + return "", err + } // if we can't get the metadata file from the release, we return latest. - return latest, nil //nolint:nilerr + return latest, nil } latestMetadata := &clusterctlv1.Metadata{} codecFactory := serializer.NewCodecFactory(scheme.Scheme) @@ -64,31 +72,27 @@ func latestContractRelease(repo Repository, contract string) (string, error) { // If the Major or Minor version of the latest release doesn't match the release series for the current contract, // return the latest patch release of the desired Major/Minor version. if sv.Major() != releaseSeries.Major || sv.Minor() != releaseSeries.Minor { - return latestPatchRelease(repo, &releaseSeries.Major, &releaseSeries.Minor) + return latestPatchRelease(ctx, repo, &releaseSeries.Major, &releaseSeries.Minor) } return latest, nil } // latestRelease returns the latest release for a repository, according to // semantic version order of the release tag name. -func latestRelease(repo Repository) (string, error) { - return latestPatchRelease(repo, nil, nil) +func latestRelease(ctx context.Context, repo Repository) (string, error) { + return latestPatchRelease(ctx, repo, nil, nil) } // latestPatchRelease returns the latest patch release for a given Major and Minor version. -func latestPatchRelease(repo Repository, major, minor *uint) (string, error) { - versions, err := repo.GetVersions() +func latestPatchRelease(ctx context.Context, repo Repository, major, minor *uint) (string, error) { + versions, err := repo.GetVersions(ctx) if err != nil { return "", errors.Wrapf(err, "failed to get repository versions") } // Search for the latest release according to semantic version ordering. // Releases with tag name that are not in semver format are ignored. - var latestTag string - var latestPrereleaseTag string - - var latestReleaseVersion *version.Version - var latestPrereleaseVersion *version.Version + versionCandidates := []*version.Version{} for _, v := range versions { sv, err := version.ParseSemantic(v) @@ -102,28 +106,48 @@ func latestPatchRelease(repo Repository, major, minor *uint) (string, error) { continue } - // track prereleases separately - if sv.PreRelease() != "" { - if latestPrereleaseVersion == nil || latestPrereleaseVersion.LessThan(sv) { - latestPrereleaseTag = v - latestPrereleaseVersion = sv - } - continue - } + versionCandidates = append(versionCandidates, sv) + } - if latestReleaseVersion == nil || latestReleaseVersion.LessThan(sv) { - latestTag = v - latestReleaseVersion = sv - } + if len(versionCandidates) == 0 { + return "", errors.New("failed to find releases tagged with a valid semantic version number") } - // Fall back to returning latest prereleases if no release has been cut or bail if it's also empty - if latestTag == "" { - if latestPrereleaseTag == "" { - return "", errors.New("failed to find releases tagged with a valid semantic version number") + // Sort parsed versions by semantic version order. + sort.SliceStable(versionCandidates, func(i, j int) bool { + // Prioritize release versions over pre-releases. For example v1.0.0 > v2.0.0-alpha + // If both are pre-releases, sort by semantic version order as usual. + if versionCandidates[j].PreRelease() == "" && versionCandidates[i].PreRelease() != "" { + return false } + if versionCandidates[i].PreRelease() == "" && versionCandidates[j].PreRelease() != "" { + return true + } + + return versionCandidates[j].LessThan(versionCandidates[i]) + }) + + // Limit the number of searchable versions by 5. + versionCandidates = versionCandidates[:min(5, len(versionCandidates))] - return latestPrereleaseTag, nil + for _, v := range versionCandidates { + // Iterate through sorted versions and try to fetch a file from that release. + // If it's completed successfully, we get the latest release. + // Note: the fetched file will be cached and next time we will get it from the cache. + versionString := "v" + v.String() + _, err := repo.GetFile(ctx, versionString, metadataFile) + if err != nil { + if errors.Is(err, errNotFound) { + // Ignore this version + continue + } + + return "", err + } + + return versionString, nil } - return latestTag, nil + + // If we reached this point, it means we didn't find any release. + return "", errors.New("failed to find releases tagged with a valid semantic version number") } diff --git a/cmd/clusterctl/client/repository/template_client.go b/cmd/clusterctl/client/repository/template_client.go index 26b50c60e74e..ea6a7d8a41d4 100644 --- a/cmd/clusterctl/client/repository/template_client.go +++ b/cmd/clusterctl/client/repository/template_client.go @@ -17,6 +17,8 @@ limitations under the License. package repository import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" @@ -27,7 +29,7 @@ import ( // TemplateClient has methods to work with cluster templates hosted on a provider repository. // Templates are yaml files to be used for creating a guest cluster. type TemplateClient interface { - Get(flavor, targetNamespace string, listVariablesOnly bool) (Template, error) + Get(ctx context.Context, flavor, targetNamespace string, listVariablesOnly bool) (Template, error) } // templateClient implements TemplateClient. @@ -66,7 +68,7 @@ func newTemplateClient(input TemplateClientInput) *templateClient { // Get return the template for the flavor specified. // In case the template does not exists, an error is returned. // Get assumes the following naming convention for templates: cluster-template[-].yaml. -func (c *templateClient) Get(flavor, targetNamespace string, skipTemplateProcess bool) (Template, error) { +func (c *templateClient) Get(ctx context.Context, flavor, targetNamespace string, skipTemplateProcess bool) (Template, error) { log := logf.Log if targetNamespace == "" { @@ -88,13 +90,13 @@ func (c *templateClient) Get(flavor, targetNamespace string, skipTemplateProcess } if rawArtifact == nil { - log.V(5).Info("Fetching", "File", name, "Provider", c.provider.Name(), "Type", c.provider.Type(), "Version", version) - rawArtifact, err = c.repository.GetFile(version, name) + log.V(5).Info("Fetching", "file", name, "provider", c.provider.Name(), "type", c.provider.Type(), "version", version) + rawArtifact, err = c.repository.GetFile(ctx, version, name) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", name, c.provider.ManifestLabel()) } } else { - log.V(1).Info("Using", "Override", name, "Provider", c.provider.ManifestLabel(), "Version", version) + log.V(1).Info("Using", "override", name, "provider", c.provider.ManifestLabel(), "version", version) } return NewTemplate(TemplateInput{ diff --git a/cmd/clusterctl/client/repository/template_client_test.go b/cmd/clusterctl/client/repository/template_client_test.go index 2f08c7cb184d..34bd6fa2f87a 100644 --- a/cmd/clusterctl/client/repository/template_client_test.go +++ b/cmd/clusterctl/client/repository/template_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "fmt" "testing" @@ -193,19 +194,19 @@ func Test_templates_Get(t *testing.T) { processor: tt.fields.processor, }, ) - got, err := f.Get(tt.args.flavor, tt.args.targetNamespace, tt.args.listVariablesOnly) + got, err := f.Get(context.Background(), tt.args.flavor, tt.args.targetNamespace, tt.args.listVariablesOnly) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got.Variables()).To(Equal(tt.want.variables)) g.Expect(got.TargetNamespace()).To(Equal(tt.want.targetNamespace)) // check variable replaced in yaml yaml, err := got.Yaml() - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) if !tt.args.listVariablesOnly { g.Expect(yaml).To(ContainSubstring(fmt.Sprintf("variable: %s", variableValue))) diff --git a/cmd/clusterctl/client/repository/template_test.go b/cmd/clusterctl/client/repository/template_test.go index e44793411e08..242979bc8b09 100644 --- a/cmd/clusterctl/client/repository/template_test.go +++ b/cmd/clusterctl/client/repository/template_test.go @@ -100,7 +100,7 @@ func Test_newTemplate(t *testing.T) { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got.Variables()).To(Equal(tt.want.variables)) g.Expect(got.TargetNamespace()).To(Equal(tt.want.targetNamespace)) @@ -111,7 +111,7 @@ func Test_newTemplate(t *testing.T) { // check variable replaced in components yml, err := got.Yaml() - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(yml).To(ContainSubstring(fmt.Sprintf("variable: %s", variableValue))) }) } @@ -153,7 +153,7 @@ metadata: } merged, err := MergeTemplates(template1, template2) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(merged.Objs()).To(HaveLen(2)) g.Expect(merged.VariableMap()).To(HaveLen(3)) diff --git a/cmd/clusterctl/client/rollout.go b/cmd/clusterctl/client/rollout.go index c69ef2edab4b..d30dc3665294 100644 --- a/cmd/clusterctl/client/rollout.go +++ b/cmd/clusterctl/client/rollout.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "fmt" "strings" @@ -85,7 +86,7 @@ type RolloutUndoOptions struct { ToRevision int64 } -func (c *clusterctlClient) RolloutRestart(options RolloutRestartOptions) error { +func (c *clusterctlClient) RolloutRestart(ctx context.Context, options RolloutRestartOptions) error { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return err @@ -95,14 +96,14 @@ func (c *clusterctlClient) RolloutRestart(options RolloutRestartOptions) error { return err } for _, ref := range objRefs { - if err := c.alphaClient.Rollout().ObjectRestarter(clusterClient.Proxy(), ref); err != nil { + if err := c.alphaClient.Rollout().ObjectRestarter(ctx, clusterClient.Proxy(), ref); err != nil { return err } } return nil } -func (c *clusterctlClient) RolloutPause(options RolloutPauseOptions) error { +func (c *clusterctlClient) RolloutPause(ctx context.Context, options RolloutPauseOptions) error { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return err @@ -112,14 +113,14 @@ func (c *clusterctlClient) RolloutPause(options RolloutPauseOptions) error { return err } for _, ref := range objRefs { - if err := c.alphaClient.Rollout().ObjectPauser(clusterClient.Proxy(), ref); err != nil { + if err := c.alphaClient.Rollout().ObjectPauser(ctx, clusterClient.Proxy(), ref); err != nil { return err } } return nil } -func (c *clusterctlClient) RolloutResume(options RolloutResumeOptions) error { +func (c *clusterctlClient) RolloutResume(ctx context.Context, options RolloutResumeOptions) error { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return err @@ -129,14 +130,14 @@ func (c *clusterctlClient) RolloutResume(options RolloutResumeOptions) error { return err } for _, ref := range objRefs { - if err := c.alphaClient.Rollout().ObjectResumer(clusterClient.Proxy(), ref); err != nil { + if err := c.alphaClient.Rollout().ObjectResumer(ctx, clusterClient.Proxy(), ref); err != nil { return err } } return nil } -func (c *clusterctlClient) RolloutUndo(options RolloutUndoOptions) error { +func (c *clusterctlClient) RolloutUndo(ctx context.Context, options RolloutUndoOptions) error { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return err @@ -146,7 +147,7 @@ func (c *clusterctlClient) RolloutUndo(options RolloutUndoOptions) error { return err } for _, ref := range objRefs { - if err := c.alphaClient.Rollout().ObjectRollbacker(clusterClient.Proxy(), ref, options.ToRevision); err != nil { + if err := c.alphaClient.Rollout().ObjectRollbacker(ctx, clusterClient.Proxy(), ref, options.ToRevision); err != nil { return err } } diff --git a/cmd/clusterctl/client/rollout_test.go b/cmd/clusterctl/client/rollout_test.go index 00a7a77d26e7..01f36f23e538 100644 --- a/cmd/clusterctl/client/rollout_test.go +++ b/cmd/clusterctl/client/rollout_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "testing" . "github.com/onsi/gomega" @@ -51,7 +52,10 @@ func fakeClientForRollout() *fakeClient { Name: "md-2", }, } - config1 := newFakeConfig(). + + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithProvider(core). WithProvider(infra) @@ -61,7 +65,7 @@ func fakeClientForRollout() *fakeClient { WithObjs(md1). WithObjs(md2) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithCluster(cluster1) return client @@ -169,12 +173,14 @@ func Test_clusterctlClient_RolloutRestart(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.RolloutRestart(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.RolloutRestart(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } @@ -252,12 +258,14 @@ func Test_clusterctlClient_RolloutPause(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.RolloutPause(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.RolloutPause(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } @@ -335,12 +343,14 @@ func Test_clusterctlClient_RolloutResume(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.RolloutResume(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.RolloutResume(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } diff --git a/cmd/clusterctl/client/topology.go b/cmd/clusterctl/client/topology.go index 8e65bb3547db..648f7a92aa89 100644 --- a/cmd/clusterctl/client/topology.go +++ b/cmd/clusterctl/client/topology.go @@ -17,6 +17,8 @@ limitations under the License. package client import ( + "context" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -46,13 +48,13 @@ type TopologyPlanOutput = cluster.TopologyPlanOutput // TopologyPlan performs a dry run execution of the topology reconciler using the given inputs. // It returns a summary of the changes observed during the execution. -func (c *clusterctlClient) TopologyPlan(options TopologyPlanOptions) (*TopologyPlanOutput, error) { +func (c *clusterctlClient) TopologyPlan(ctx context.Context, options TopologyPlanOptions) (*TopologyPlanOutput, error) { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return nil, err } - out, err := clusterClient.Topology().Plan(&cluster.TopologyPlanInput{ + out, err := clusterClient.Topology().Plan(ctx, &cluster.TopologyPlanInput{ Objs: options.Objs, TargetClusterName: options.Cluster, TargetNamespace: options.Namespace, diff --git a/cmd/clusterctl/client/tree/annotations.go b/cmd/clusterctl/client/tree/annotations.go index 18a085bab9ea..afb18cebb127 100644 --- a/cmd/clusterctl/client/tree/annotations.go +++ b/cmd/clusterctl/client/tree/annotations.go @@ -51,7 +51,7 @@ const ( GroupItemsSeparator = ", " // ObjectZOrderAnnotation contains an integer that defines the sorting of child objects when the object tree is printed. - // Objects are sorted by their z-order from highest to lowest, and then by their name in alphaebetical order if the + // Objects are sorted by their z-order from highest to lowest, and then by their name in alphabetical order if the // z-order is the same. Objects with no z-order set are assumed to have a default z-order of 0. ObjectZOrderAnnotation = "tree.cluster.x-k8s.io.io/z-order" ) diff --git a/cmd/clusterctl/client/tree/discovery.go b/cmd/clusterctl/client/tree/discovery.go index e16b250ac9bc..09feb1960440 100644 --- a/cmd/clusterctl/client/tree/discovery.go +++ b/cmd/clusterctl/client/tree/discovery.go @@ -19,6 +19,7 @@ package tree import ( "context" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -83,9 +84,11 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt tree := NewObjectTree(cluster, options.toObjectTreeOptions()) // Adds cluster infra - if clusterInfra, err := external.Get(ctx, c, cluster.Spec.InfrastructureRef, cluster.Namespace); err == nil { - tree.Add(cluster, clusterInfra, ObjectMetaName("ClusterInfrastructure")) + clusterInfra, err := external.Get(ctx, c, cluster.Spec.InfrastructureRef, cluster.Namespace) + if err != nil { + return nil, errors.Wrap(err, "get InfraCluster reference from Cluster") } + tree.Add(cluster, clusterInfra, ObjectMetaName("ClusterInfrastructure")) if options.ShowClusterResourceSets { addClusterResourceSetsToObjectTree(ctx, c, cluster, tree) @@ -108,20 +111,26 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt machineMap[m.Name] = true if visible { - if machineInfra, err := external.Get(ctx, c, &m.Spec.InfrastructureRef, cluster.Namespace); err == nil { - tree.Add(m, machineInfra, ObjectMetaName("MachineInfrastructure"), NoEcho(true)) + if (m.Spec.InfrastructureRef != corev1.ObjectReference{}) { + if machineInfra, err := external.Get(ctx, c, &m.Spec.InfrastructureRef, cluster.Namespace); err == nil { + tree.Add(m, machineInfra, ObjectMetaName("MachineInfrastructure"), NoEcho(true)) + } } - if machineBootstrap, err := external.Get(ctx, c, m.Spec.Bootstrap.ConfigRef, cluster.Namespace); err == nil { - tree.Add(m, machineBootstrap, ObjectMetaName("BootstrapConfig"), NoEcho(true)) + if m.Spec.Bootstrap.ConfigRef != nil { + if machineBootstrap, err := external.Get(ctx, c, m.Spec.Bootstrap.ConfigRef, cluster.Namespace); err == nil { + tree.Add(m, machineBootstrap, ObjectMetaName("BootstrapConfig"), NoEcho(true)) + } } } } controlPlaneMachines := selectControlPlaneMachines(machinesList) - for i := range controlPlaneMachines { - cp := controlPlaneMachines[i] - addMachineFunc(controlPlane, cp) + if controlPlane != nil { + for i := range controlPlaneMachines { + cp := controlPlaneMachines[i] + addMachineFunc(controlPlane, cp) + } } machinePoolList, err := getMachinePoolsInCluster(ctx, c, cluster.Namespace, cluster.Name) @@ -141,25 +150,25 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt if err != nil { return nil, err } - - // Handles orphan machines. - if len(machineMap) < len(machinesList.Items) { - other := VirtualObject(cluster.Namespace, "OtherGroup", "Other") - tree.Add(workers, other) - - for i := range machinesList.Items { - m := &machinesList.Items[i] - if _, ok := machineMap[m.Name]; ok { - continue - } - addMachineFunc(other, m) - } - } } if len(machinePoolList.Items) > 0 { // Add MachinePool objects tree.Add(cluster, workers) - addMachinePoolsToObjectTree(ctx, c, cluster.Namespace, workers, machinePoolList, tree) + addMachinePoolsToObjectTree(ctx, c, cluster.Namespace, workers, machinePoolList, machinesList, tree, addMachineFunc) + } + + // Handles orphan machines. + if len(machineMap) < len(machinesList.Items) { + other := VirtualObject(cluster.Namespace, "OtherGroup", "Other") + tree.Add(workers, other) + + for i := range machinesList.Items { + m := &machinesList.Items[i] + if _, ok := machineMap[m.Name]; ok { + continue + } + addMachineFunc(other, m) + } } return tree, nil @@ -236,8 +245,11 @@ func addMachineDeploymentToObjectTree(ctx context.Context, c client.Client, clus templateParent = md } - bootstrapTemplateRefObject := ObjectReferenceObject(md.Spec.Template.Spec.Bootstrap.ConfigRef) - tree.Add(templateParent, bootstrapTemplateRefObject, ObjectMetaName("BootstrapConfigTemplate")) + // md.Spec.Template.Spec.Bootstrap.ConfigRef is optional + if md.Spec.Template.Spec.Bootstrap.ConfigRef != nil { + bootstrapTemplateRefObject := ObjectReferenceObject(md.Spec.Template.Spec.Bootstrap.ConfigRef) + tree.Add(templateParent, bootstrapTemplateRefObject, ObjectMetaName("BootstrapConfigTemplate")) + } machineTemplateRefObject := ObjectReferenceObject(&md.Spec.Template.Spec.InfrastructureRef) tree.Add(templateParent, machineTemplateRefObject, ObjectMetaName("MachineInfrastructureTemplate")) @@ -263,10 +275,10 @@ func addMachineDeploymentToObjectTree(ctx context.Context, c client.Client, clus return nil } -func addMachinePoolsToObjectTree(ctx context.Context, c client.Client, namespace string, workers *unstructured.Unstructured, machinePoolList *expv1.MachinePoolList, tree *ObjectTree) { +func addMachinePoolsToObjectTree(ctx context.Context, c client.Client, namespace string, workers *unstructured.Unstructured, machinePoolList *expv1.MachinePoolList, machinesList *clusterv1.MachineList, tree *ObjectTree, addMachineFunc func(parent client.Object, m *clusterv1.Machine)) { for i := range machinePoolList.Items { mp := &machinePoolList.Items[i] - _, visible := tree.Add(workers, mp) + _, visible := tree.Add(workers, mp, GroupingObject(true)) if visible { if machinePoolBootstrap, err := external.Get(ctx, c, mp.Spec.Template.Spec.Bootstrap.ConfigRef, namespace); err == nil { @@ -274,9 +286,14 @@ func addMachinePoolsToObjectTree(ctx context.Context, c client.Client, namespace } if machinePoolInfra, err := external.Get(ctx, c, &mp.Spec.Template.Spec.InfrastructureRef, namespace); err == nil { - tree.Add(mp, machinePoolInfra, ObjectMetaName("MachineInfrastructure"), NoEcho(true)) + tree.Add(mp, machinePoolInfra, ObjectMetaName("MachinePoolInfrastructure"), NoEcho(true)) } } + + machines := selectMachinesControlledBy(machinesList, mp) + for _, m := range machines { + addMachineFunc(mp, m) + } } } diff --git a/cmd/clusterctl/client/tree/discovery_test.go b/cmd/clusterctl/client/tree/discovery_test.go index 7509aec38f65..ed8bb4284710 100644 --- a/cmd/clusterctl/client/tree/discovery_test.go +++ b/cmd/clusterctl/client/tree/discovery_test.go @@ -215,6 +215,86 @@ func Test_Discovery(t *testing.T) { }, }, }, + { + name: "Discovery with MachinePool Machines with echo enabled", + args: args{ + discoverOptions: DiscoverOptions{ + Grouping: false, + Echo: true, + }, + objs: test.NewFakeCluster("ns1", "cluster1"). + WithControlPlane( + test.NewFakeControlPlane("cp"). + WithMachines( + test.NewFakeMachine("cp1"), + ), + ). + WithMachinePools( + test.NewFakeMachinePool("mp1"). + WithMachines( + test.NewFakeMachine("mp1m1"), + test.NewFakeMachine("mp1m2"), + ), + ). + Objs(), + }, + wantTree: map[string][]string{ + // Cluster should be parent of InfrastructureCluster, ControlPlane, and WorkerNodes + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1": { + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1", + "controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlane, ns1/cp", + "virtual.cluster.x-k8s.io/v1beta1, Kind=WorkerGroup, ns1/Workers", + }, + // InfrastructureCluster should be leaf + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1": {}, + // ControlPlane should have a machine + "controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlane, ns1/cp": { + "cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/cp1", + }, + // Workers should have a machine deployment + "virtual.cluster.x-k8s.io/v1beta1, Kind=WorkerGroup, ns1/Workers": { + "cluster.x-k8s.io/v1beta1, Kind=MachinePool, ns1/mp1", + }, + // Machine Pool should have a group of machines + "cluster.x-k8s.io/v1beta1, Kind=MachinePool, ns1/mp1": { + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/mp1", + "bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfigTemplate, ns1/mp1", + "cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/mp1m1", + "cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/mp1m2", + }, + // Machine should have infra machine and bootstrap (echo) + "cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/cp1": { + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/cp1", + "bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/cp1", + }, + // MachinePool Machine should only have infra machine + "cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/mp1m1": { + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/mp1m1", + }, + "cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/mp1m2": { + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/mp1m2", + }, + }, + wantNodeCheck: map[string]nodeCheck{ + // InfrastructureCluster should have a meta name + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1": func(g *WithT, obj client.Object) { + g.Expect(GetMetaName(obj)).To(Equal("ClusterInfrastructure")) + }, + // ControlPlane should have a meta name, should NOT be a grouping object + "controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlane, ns1/cp": func(g *WithT, obj client.Object) { + g.Expect(GetMetaName(obj)).To(Equal("ControlPlane")) + g.Expect(IsGroupingObject(obj)).To(BeFalse()) + }, + // Workers should be a virtual node + "virtual.cluster.x-k8s.io/v1beta1, Kind=WorkerGroup, ns1/Workers": func(g *WithT, obj client.Object) { + g.Expect(IsVirtualObject(obj)).To(BeTrue()) + }, + // Machine pool should NOT be a grouping object + "cluster.x-k8s.io/v1beta1, Kind=MachinePool, ns1/mp1": func(g *WithT, obj client.Object) { + g.Expect(IsGroupingObject(obj)).To(BeFalse()) + }, + }, + }, { name: "Discovery with grouping and no-echo disabled", args: args{ @@ -397,6 +477,17 @@ func Test_Discovery(t *testing.T) { test.NewFakeInfrastructureTemplate("md1"), ), ). + WithMachineDeployments( + test.NewFakeMachineDeployment("md2"). + WithStaticBootstrapConfig(). + WithMachineSets( + test.NewFakeMachineSet("ms2"). + WithMachines( + test.NewFakeMachine("m3"), + test.NewFakeMachine("m4"), + ), + ), + ). Objs(), }, wantTree: map[string][]string{ @@ -418,6 +509,7 @@ func Test_Discovery(t *testing.T) { // Workers should have a machine deployment "virtual.cluster.x-k8s.io/v1beta1, Kind=WorkerGroup, ns1/Workers": { "cluster.x-k8s.io/v1beta1, Kind=MachineDeployment, ns1/md1", + "cluster.x-k8s.io/v1beta1, Kind=MachineDeployment, ns1/md2", }, // Machine deployment should have a group of machines (grouping) and templates group "cluster.x-k8s.io/v1beta1, Kind=MachineDeployment, ns1/md1": { @@ -433,6 +525,17 @@ func Test_Discovery(t *testing.T) { "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/md1": {}, // MachineDeployment BootstrapConfigRef should be a leaf "bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfigTemplate, ns1/md1": {}, + // Machine deployment should have a group of machines (grouping) and templates group + "cluster.x-k8s.io/v1beta1, Kind=MachineDeployment, ns1/md2": { + "virtual.cluster.x-k8s.io/v1beta1, Kind=MachineGroup, ns1/zzz_", + "virtual.cluster.x-k8s.io/v1beta1, Kind=TemplateGroup, ns1/md2", + }, + // MachineDeployment TemplateGroup using static bootstrap will only have InfrastructureRef + "virtual.cluster.x-k8s.io/v1beta1, Kind=TemplateGroup, ns1/md2": { + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/md2", + }, + // MachineDeployment InfrastructureRef should be a leaf + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/md2": {}, // ControlPlane TemplateGroup should have a InfrastructureRef "virtual.cluster.x-k8s.io/v1beta1, Kind=TemplateGroup, ns1/cp": { "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/cp", @@ -738,7 +841,7 @@ func Test_Discovery(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - client, err := test.NewFakeProxy().WithObjs(tt.args.objs...).NewClient() + client, err := test.NewFakeProxy().WithObjs(tt.args.objs...).NewClient(context.Background()) g.Expect(client).ToNot(BeNil()) g.Expect(err).ToNot(HaveOccurred()) diff --git a/cmd/clusterctl/client/tree/tree_test.go b/cmd/clusterctl/client/tree/tree_test.go index d92bb1caf84e..9b3253958d58 100644 --- a/cmd/clusterctl/client/tree/tree_test.go +++ b/cmd/clusterctl/client/tree/tree_test.go @@ -290,7 +290,7 @@ func Test_createGroupNode(t *testing.T) { want.SetName(got.GetName()) want.SetUID(got.GetUID()) - g.Expect(got).To(Equal(want)) + g.Expect(got).To(BeComparableTo(want)) } func Test_updateGroupNode(t *testing.T) { @@ -367,7 +367,7 @@ func Test_updateGroupNode(t *testing.T) { g := NewWithT(t) updateGroupNode(group, GetReadyCondition(group), obj, GetReadyCondition(obj)) - g.Expect(group).To(Equal(want)) + g.Expect(group).To(BeComparableTo(want)) } func Test_Add_setsShowObjectConditionsAnnotation(t *testing.T) { diff --git a/cmd/clusterctl/client/upgrade.go b/cmd/clusterctl/client/upgrade.go index 849ed23cc889..5979c9461b72 100644 --- a/cmd/clusterctl/client/upgrade.go +++ b/cmd/clusterctl/client/upgrade.go @@ -17,14 +17,13 @@ limitations under the License. package client import ( + "context" "strings" "time" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" - clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -38,7 +37,7 @@ type PlanUpgradeOptions struct { Kubeconfig Kubeconfig } -func (c *clusterctlClient) PlanCertManagerUpgrade(options PlanUpgradeOptions) (CertManagerUpgradePlan, error) { +func (c *clusterctlClient) PlanCertManagerUpgrade(ctx context.Context, options PlanUpgradeOptions) (CertManagerUpgradePlan, error) { // Get the client for interacting with the management cluster. cluster, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { @@ -46,33 +45,28 @@ func (c *clusterctlClient) PlanCertManagerUpgrade(options PlanUpgradeOptions) (C } certManager := cluster.CertManager() - plan, err := certManager.PlanUpgrade() + plan, err := certManager.PlanUpgrade(ctx) return CertManagerUpgradePlan(plan), err } -func (c *clusterctlClient) PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePlan, error) { +func (c *clusterctlClient) PlanUpgrade(ctx context.Context, options PlanUpgradeOptions) ([]UpgradePlan, error) { // Get the client for interacting with the management cluster. clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return nil, err } - // Ensure this command only runs against management clusters with the current Cluster API contract (default) or the previous one. - // NOTE: given that v1beta1 (current) and v1alpha4 (previous) does not have breaking changes, we support also upgrades from v1alpha3 to v1beta1; - // this is an exception and support for skipping releases should be removed in future releases. - if err := clusterClient.ProviderInventory().CheckCAPIContract( - cluster.AllowCAPIContract{Contract: clusterv1alpha3.GroupVersion.Version}, - cluster.AllowCAPIContract{Contract: clusterv1alpha4.GroupVersion.Version}, - ); err != nil { + // Ensure this command only runs against management clusters with the current Cluster API contract. + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx); err != nil { return nil, err } // Ensures the custom resource definitions required by clusterctl are in place. - if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return nil, err } - upgradePlans, err := clusterClient.ProviderUpgrader().Plan() + upgradePlans, err := clusterClient.ProviderUpgrader().Plan(ctx) if err != nil { return nil, err } @@ -121,6 +115,9 @@ type ApplyUpgradeOptions struct { // RuntimeExtensionProviders instance and versions (e.g. runtime-extension-system/test:v0.0.1) to upgrade to. This field can be used as alternative to Contract. RuntimeExtensionProviders []string + // AddonProviders instance and versions (e.g. caaph-system/helm:v0.1.0) to upgrade to. This field can be used as alternative to Contract. + AddonProviders []string + // WaitProviders instructs the upgrade apply command to wait till the providers are successfully upgraded. WaitProviders bool @@ -128,29 +125,30 @@ type ApplyUpgradeOptions struct { WaitProviderTimeout time.Duration } -func (c *clusterctlClient) ApplyUpgrade(options ApplyUpgradeOptions) error { +func (c *clusterctlClient) ApplyUpgrade(ctx context.Context, options ApplyUpgradeOptions) error { if options.Contract != "" && options.Contract != clusterv1.GroupVersion.Version { return errors.Errorf("current version of clusterctl could only upgrade to %s contract, requested %s", clusterv1.GroupVersion.Version, options.Contract) } + // Default WaitProviderTimeout as we cannot rely on defaulting in the CLI + // when clusterctl is used as a library. + if options.WaitProviderTimeout.Nanoseconds() == 0 { + options.WaitProviderTimeout = time.Duration(5*60) * time.Second + } + // Get the client for interacting with the management cluster. clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return err } - // Ensure this command only runs against management clusters with the current Cluster API contract (default) or the previous one. - // NOTE: given that v1beta1 (current) and v1alpha4 (previous) does not have breaking changes, we support also upgrades from v1alpha3 to v1beta1; - // this is an exception and support for skipping releases should be removed in future releases. - if err := clusterClient.ProviderInventory().CheckCAPIContract( - cluster.AllowCAPIContract{Contract: clusterv1alpha3.GroupVersion.Version}, - cluster.AllowCAPIContract{Contract: clusterv1alpha4.GroupVersion.Version}, - ); err != nil { + // Ensure this command only runs against management clusters with the current Cluster API contract. + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx); err != nil { return err } // Ensures the custom resource definitions required by clusterctl are in place. - if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return err } @@ -159,7 +157,7 @@ func (c *clusterctlClient) ApplyUpgrade(options ApplyUpgradeOptions) error { // conversion web-hooks around Issuer/Certificate kinds, so installing an older versions of providers // should continue to work with the latest cert-manager. certManager := clusterClient.CertManager() - if err := certManager.EnsureLatestVersion(); err != nil { + if err := certManager.EnsureLatestVersion(ctx); err != nil { return err } @@ -169,7 +167,8 @@ func (c *clusterctlClient) ApplyUpgrade(options ApplyUpgradeOptions) error { len(options.ControlPlaneProviders) > 0 || len(options.InfrastructureProviders) > 0 || len(options.IPAMProviders) > 0 || - len(options.RuntimeExtensionProviders) > 0 + len(options.RuntimeExtensionProviders) > 0 || + len(options.AddonProviders) > 0 opts := cluster.UpgradeOptions{ WaitProviders: options.WaitProviders, @@ -182,43 +181,47 @@ func (c *clusterctlClient) ApplyUpgrade(options ApplyUpgradeOptions) error { upgradeItems := []cluster.UpgradeItem{} if options.CoreProvider != "" { - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.CoreProviderType, options.CoreProvider) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.CoreProviderType, options.CoreProvider) if err != nil { return err } } - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.BootstrapProviderType, options.BootstrapProviders...) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.BootstrapProviderType, options.BootstrapProviders...) + if err != nil { + return err + } + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.ControlPlaneProviderType, options.ControlPlaneProviders...) if err != nil { return err } - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.ControlPlaneProviderType, options.ControlPlaneProviders...) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.InfrastructureProviderType, options.InfrastructureProviders...) if err != nil { return err } - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.InfrastructureProviderType, options.InfrastructureProviders...) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.IPAMProviderType, options.IPAMProviders...) if err != nil { return err } - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.IPAMProviderType, options.IPAMProviders...) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.RuntimeExtensionProviderType, options.RuntimeExtensionProviders...) if err != nil { return err } - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.RuntimeExtensionProviderType, options.RuntimeExtensionProviders...) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.AddonProviderType, options.AddonProviders...) if err != nil { return err } // Execute the upgrade using the custom upgrade items - return clusterClient.ProviderUpgrader().ApplyCustomPlan(opts, upgradeItems...) + return clusterClient.ProviderUpgrader().ApplyCustomPlan(ctx, opts, upgradeItems...) } // Otherwise we are upgrading a whole management cluster according to a clusterctl generated upgrade plan. - return clusterClient.ProviderUpgrader().ApplyPlan(opts, options.Contract) + return clusterClient.ProviderUpgrader().ApplyPlan(ctx, opts, options.Contract) } -func addUpgradeItems(clusterClient cluster.Client, upgradeItems []cluster.UpgradeItem, providerType clusterctlv1.ProviderType, providers ...string) ([]cluster.UpgradeItem, error) { +func addUpgradeItems(ctx context.Context, clusterClient cluster.Client, upgradeItems []cluster.UpgradeItem, providerType clusterctlv1.ProviderType, providers ...string) ([]cluster.UpgradeItem, error) { for _, upgradeReference := range providers { - providerUpgradeItem, err := parseUpgradeItem(clusterClient, upgradeReference, providerType) + providerUpgradeItem, err := parseUpgradeItem(ctx, clusterClient, upgradeReference, providerType) if err != nil { return nil, err } @@ -230,7 +233,7 @@ func addUpgradeItems(clusterClient cluster.Client, upgradeItems []cluster.Upgrad return upgradeItems, nil } -func parseUpgradeItem(clusterClient cluster.Client, ref string, providerType clusterctlv1.ProviderType) (*cluster.UpgradeItem, error) { +func parseUpgradeItem(ctx context.Context, clusterClient cluster.Client, ref string, providerType clusterctlv1.ProviderType) (*cluster.UpgradeItem, error) { // TODO(oscr) Remove when explicit namespaces for providers is removed // ref format is old format: namespace/provider:version if strings.Contains(ref, "/") { @@ -238,7 +241,7 @@ func parseUpgradeItem(clusterClient cluster.Client, ref string, providerType clu } // ref format is: provider:version - return parseUpgradeItemWithoutNamespace(clusterClient, ref, providerType) + return parseUpgradeItemWithoutNamespace(ctx, clusterClient, ref, providerType) } func parseUpgradeItemWithNamespace(ref string, providerType clusterctlv1.ProviderType) (*cluster.UpgradeItem, error) { @@ -274,7 +277,7 @@ func parseUpgradeItemWithNamespace(ref string, providerType clusterctlv1.Provide }, nil } -func parseUpgradeItemWithoutNamespace(clusterClient cluster.Client, ref string, providerType clusterctlv1.ProviderType) (*cluster.UpgradeItem, error) { +func parseUpgradeItemWithoutNamespace(ctx context.Context, clusterClient cluster.Client, ref string, providerType clusterctlv1.ProviderType) (*cluster.UpgradeItem, error) { if !strings.Contains(ref, ":") { return nil, errors.Errorf(upgradeItemProviderNameError, ref) } @@ -284,7 +287,7 @@ func parseUpgradeItemWithoutNamespace(clusterClient cluster.Client, ref string, return nil, errors.Wrapf(err, upgradeItemProviderNameError, ref) } - namespace, err := clusterClient.ProviderInventory().GetProviderNamespace(name, providerType) + namespace, err := clusterClient.ProviderInventory().GetProviderNamespace(ctx, name, providerType) if err != nil { return nil, errors.Errorf("unable to find default namespace for provider %q", ref) } diff --git a/cmd/clusterctl/client/upgrade_test.go b/cmd/clusterctl/client/upgrade_test.go index 47abe4807a46..c5512807bbfe 100644 --- a/cmd/clusterctl/client/upgrade_test.go +++ b/cmd/clusterctl/client/upgrade_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "sort" "testing" @@ -35,13 +36,15 @@ func Test_clusterctlClient_PlanCertUpgrade(t *testing.T) { // create a fake config with a provider named P1 and a variable named var repository1Config := config.NewProvider("p1", "url", clusterctlv1.CoreProviderType) - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithVar("var", "value"). WithProvider(repository1Config) // create a fake repository with some YAML files in it (usually matching // the list of providers defined in the config) - repository1 := newFakeRepository(repository1Config, config1). + repository1 := newFakeRepository(ctx, repository1Config, config1). WithPaths("root", "components"). WithDefaultVersion("v1.0"). WithFile("v1.0", "components.yaml", []byte("content")) @@ -56,7 +59,7 @@ func Test_clusterctlClient_PlanCertUpgrade(t *testing.T) { cluster1 := newFakeCluster(cluster.Kubeconfig{Path: "cluster1"}, config1). WithCertManagerClient(newFakeCertManagerClient(nil, nil).WithCertManagerPlan(certManagerPlan)) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithRepository(repository1). WithCluster(cluster1) @@ -75,17 +78,20 @@ func Test_clusterctlClient_PlanCertUpgrade(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + options := PlanUpgradeOptions{ Kubeconfig: Kubeconfig{Path: "cluster1"}, } - actualPlan, err := tt.client.PlanCertManagerUpgrade(options) + actualPlan, err := tt.client.PlanCertManagerUpgrade(ctx, options) if tt.expectErr { g.Expect(err).To(HaveOccurred()) - g.Expect(actualPlan).To(Equal(CertManagerUpgradePlan{})) + g.Expect(actualPlan).To(BeComparableTo(CertManagerUpgradePlan{})) return } g.Expect(err).ToNot(HaveOccurred()) - g.Expect(actualPlan).To(Equal(certManagerPlan)) + g.Expect(actualPlan).To(BeComparableTo(certManagerPlan)) }) } } @@ -133,12 +139,14 @@ func Test_clusterctlClient_PlanUpgrade(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - _, err := tt.fields.client.PlanUpgrade(tt.args.options) + ctx := context.Background() + + _, err := tt.fields.client.PlanUpgrade(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } @@ -173,10 +181,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { }, }, wantProviders: &clusterctlv1.ProviderList{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterctlv1.GroupVersion.String(), - Kind: "ProviderList", - }, ListMeta: metav1.ListMeta{}, Items: []clusterctlv1.Provider{ // both providers should be upgraded fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"), @@ -201,10 +205,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { }, }, wantProviders: &clusterctlv1.ProviderList{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterctlv1.GroupVersion.String(), - Kind: "ProviderList", - }, ListMeta: metav1.ListMeta{}, Items: []clusterctlv1.Provider{ // only one provider should be upgraded fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"), @@ -229,10 +229,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { }, }, wantProviders: &clusterctlv1.ProviderList{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterctlv1.GroupVersion.String(), - Kind: "ProviderList", - }, ListMeta: metav1.ListMeta{}, Items: []clusterctlv1.Provider{ // only one provider should be upgraded fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system"), @@ -257,10 +253,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { }, }, wantProviders: &clusterctlv1.ProviderList{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterctlv1.GroupVersion.String(), - Kind: "ProviderList", - }, ListMeta: metav1.ListMeta{}, Items: []clusterctlv1.Provider{ fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"), @@ -274,20 +266,22 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.ApplyUpgrade(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.ApplyUpgrade(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // converting between client and cluster alias for Kubeconfig input := cluster.Kubeconfig(tt.args.options.Kubeconfig) proxy := tt.fields.client.clusters[input].Proxy() gotProviders := &clusterctlv1.ProviderList{} - c, err := proxy.NewClient() - g.Expect(err).NotTo(HaveOccurred()) + c, err := proxy.NewClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(c.List(ctx, gotProviders)).To(Succeed()) @@ -300,7 +294,7 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { for i := range gotProviders.Items { tt.wantProviders.Items[i].ResourceVersion = gotProviders.Items[i].ResourceVersion } - g.Expect(gotProviders).To(Equal(tt.wantProviders), cmp.Diff(gotProviders, tt.wantProviders)) + g.Expect(gotProviders).To(BeComparableTo(tt.wantProviders), cmp.Diff(gotProviders, tt.wantProviders)) }) } } @@ -309,11 +303,13 @@ func fakeClientForUpgrade() *fakeClient { core := config.NewProvider("cluster-api", "https://somewhere.com", clusterctlv1.CoreProviderType) infra := config.NewProvider("infra", "https://somewhere.com", clusterctlv1.InfrastructureProviderType) - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithProvider(core). WithProvider(infra) - repository1 := newFakeRepository(core, config1). + repository1 := newFakeRepository(ctx, core, config1). WithPaths("root", "components.yaml"). WithDefaultVersion("v1.0.1"). WithFile("v1.0.1", "components.yaml", componentsYAML("ns2")). @@ -323,7 +319,7 @@ func fakeClientForUpgrade() *fakeClient { {Major: 1, Minor: 0, Contract: test.CurrentCAPIContract}, }, }) - repository2 := newFakeRepository(infra, config1). + repository2 := newFakeRepository(ctx, infra, config1). WithPaths("root", "components.yaml"). WithDefaultVersion("v2.0.0"). WithFile("v2.0.1", "components.yaml", componentsYAML("ns2")). @@ -341,7 +337,7 @@ func fakeClientForUpgrade() *fakeClient { WithProviderInventory(infra.Name(), infra.Type(), "v2.0.0", "infra-system"). WithObjs(test.FakeCAPISetupObjects()...) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithRepository(repository1). WithRepository(repository2). WithCluster(cluster1) @@ -376,7 +372,9 @@ func Test_parseUpgradeItem(t *testing.T) { provider string } - configClient := newFakeConfig() + ctx := context.Background() + + configClient := newFakeConfig(ctx) clusterClient := newFakeCluster(cluster.Kubeconfig{Path: "cluster1"}, configClient) clusterClient.WithProviderInventory("best-provider", clusterctlv1.CoreProviderType, "v1.0.0", "best-provider-system") @@ -469,14 +467,16 @@ func Test_parseUpgradeItem(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := parseUpgradeItem(clusterClient, tt.args.provider, clusterctlv1.CoreProviderType) + ctx := context.Background() + + got, err := parseUpgradeItem(ctx, clusterClient, tt.args.provider, clusterctlv1.CoreProviderType) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) + g.Expect(got).To(BeComparableTo(tt.want)) }) } } diff --git a/cmd/clusterctl/client/yamlprocessor/simple_processor_test.go b/cmd/clusterctl/client/yamlprocessor/simple_processor_test.go index 6f4c29bc367b..eb21db233821 100644 --- a/cmd/clusterctl/client/yamlprocessor/simple_processor_test.go +++ b/cmd/clusterctl/client/yamlprocessor/simple_processor_test.go @@ -271,7 +271,7 @@ func TestSimpleProcessor_Process(t *testing.T) { g.Expect(got).To(Equal(tt.args.yaml)) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) diff --git a/cmd/clusterctl/cmd/completion.go b/cmd/clusterctl/cmd/completion.go index 3501781d104d..89ad896dd642 100644 --- a/cmd/clusterctl/cmd/completion.go +++ b/cmd/clusterctl/cmd/completion.go @@ -18,6 +18,7 @@ package cmd import ( "bytes" + "context" "fmt" "io" "os" @@ -48,7 +49,7 @@ const completionBoilerPlate = `# Copyright 2021 The Kubernetes Authors. var ( completionLong = LongDesc(` - Output shell completion code for the specified shell (bash or zsh). + Output shell completion code for the specified shell (bash, zsh or fish). The shell code must be evaluated to provide interactive completion of clusterctl commands. This can be done by sourcing it from the .bash_profile.`) @@ -76,15 +77,19 @@ var ( # To load completions for each session, execute once: clusterctl completion zsh > "${fpath[1]}/_clusterctl" + Fish: + # To load completions in your current shell, execute the following command: + clusterctl completion fish | source + # You will need to start a new shell for this setup to take effect.`) completionCmd = &cobra.Command{ - Use: "completion [bash|zsh]", + Use: "completion [bash|zsh|fish]", GroupID: groupOther, - Short: "Output shell completion code for the specified shell (bash or zsh)", + Short: "Output shell completion code for the specified shell (bash, zsh or fish)", Long: LongDesc(completionLong), Example: completionExample, - Args: func(cmd *cobra.Command, args []string) error { + Args: func(_ *cobra.Command, args []string) error { if len(args) != 1 { return errors.New("please specify a shell") } @@ -99,6 +104,7 @@ var ( completionShells = map[string]func(out io.Writer, cmd *cobra.Command) error{ "bash": runCompletionBash, "zsh": runCompletionZsh, + "fish": runCompletionFish, } ) @@ -130,6 +136,12 @@ func runCompletionBash(out io.Writer, cmd *cobra.Command) error { return cmd.Root().GenBashCompletion(out) } +func runCompletionFish(out io.Writer, cmd *cobra.Command) error { + fmt.Fprintf(out, "%s\n", completionBoilerPlate) + + return cmd.Root().GenFishCompletion(out, true) +} + func runCompletionZsh(out io.Writer, cmd *cobra.Command) error { var b bytes.Buffer @@ -152,8 +164,8 @@ func runCompletionZsh(out io.Writer, cmd *cobra.Command) error { } func contextCompletionFunc(kubeconfigFlag *pflag.Flag) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - configClient, err := config.New(cfgFile) + return func(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { + configClient, err := config.New(context.Background(), cfgFile) if err != nil { return completionError(err) } @@ -169,8 +181,10 @@ func contextCompletionFunc(kubeconfigFlag *pflag.Flag) func(cmd *cobra.Command, } func resourceNameCompletionFunc(kubeconfigFlag, contextFlag, namespaceFlag *pflag.Flag, groupVersion, kind string) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - configClient, err := config.New(cfgFile) + return func(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ctx := context.Background() + + configClient, err := config.New(ctx, cfgFile) if err != nil { return completionError(err) } @@ -189,7 +203,7 @@ func resourceNameCompletionFunc(kubeconfigFlag, contextFlag, namespaceFlag *pfla } } - comps, err := clusterClient.Proxy().GetResourceNames(groupVersion, kind, []client.ListOption{client.InNamespace(namespace)}, toComplete) + comps, err := clusterClient.Proxy().GetResourceNames(ctx, groupVersion, kind, []client.ListOption{client.InNamespace(namespace)}, toComplete) if err != nil { return completionError(err) } diff --git a/cmd/clusterctl/cmd/config_repositories.go b/cmd/clusterctl/cmd/config_repositories.go index 48e189d18771..1351e118cfb8 100644 --- a/cmd/clusterctl/cmd/config_repositories.go +++ b/cmd/clusterctl/cmd/config_repositories.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "io" "os" @@ -56,7 +57,7 @@ var configRepositoryCmd = &cobra.Command{ Display the list of providers and their repository configurations. clusterctl ships with a list of known providers; if necessary, edit - $HOME/.cluster-api/clusterctl.yaml file to add a new provider or to customize existing ones.`), + $XDG_CONFIG_HOME/cluster-api/clusterctl.yaml file to add a new provider or to customize existing ones.`), Example: Examples(` # Displays the list of available providers. @@ -65,7 +66,7 @@ var configRepositoryCmd = &cobra.Command{ # Print the list of available providers in yaml format. clusterctl config repositories -o yaml`), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runGetRepositories(cfgFile, os.Stdout) }, } @@ -85,7 +86,9 @@ func runGetRepositories(cfgFile string, out io.Writer) error { return errors.New("unable to print to nil output writer") } - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } diff --git a/cmd/clusterctl/cmd/config_repositories_test.go b/cmd/clusterctl/cmd/config_repositories_test.go index 079737bad555..5b0f9a74a3b4 100644 --- a/cmd/clusterctl/cmd/config_repositories_test.go +++ b/cmd/clusterctl/cmd/config_repositories_test.go @@ -23,6 +23,7 @@ import ( "path/filepath" "testing" + "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" ) @@ -31,7 +32,7 @@ func Test_runGetRepositories(t *testing.T) { g := NewWithT(t) tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(tmpDir) path := filepath.Join(tmpDir, "clusterctl.yaml") @@ -45,11 +46,13 @@ func Test_runGetRepositories(t *testing.T) { out, err := io.ReadAll(buf) g.Expect(err).ToNot(HaveOccurred()) + var diff string if val == RepositoriesOutputText { - g.Expect(string(out)).To(Equal(expectedOutputText)) + diff = cmp.Diff(expectedOutputText, string(out)) } else if val == RepositoriesOutputYaml { - g.Expect(string(out)).To(Equal(expectedOutputYaml)) + diff = cmp.Diff(expectedOutputYaml, string(out)) } + g.Expect(diff).To(BeEmpty()) // Use diff to compare as Gomega output does not actually print the string values on failure } }) @@ -68,7 +71,7 @@ func Test_runGetRepositories(t *testing.T) { g := NewWithT(t) tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(tmpDir) path := filepath.Join(tmpDir, "clusterctl.yaml") @@ -83,7 +86,7 @@ var template = `--- providers: # add a custom provider - name: "my-infra-provider" - url: "/home/.cluster-api/overrides/infrastructure-docker/latest/infrastructure-components.yaml" + url: "/home/.config/cluster-api/overrides/infrastructure-docker/latest/infrastructure-components.yaml" type: "InfrastructureProvider" # add a custom provider - name: "another-provider" @@ -99,44 +102,59 @@ providers: type: "CoreProvider" ` -var expectedOutputText = `NAME TYPE URL FILE -cluster-api CoreProvider https://github.com/myorg/myforkofclusterapi/releases/latest/ core_components.yaml -another-provider BootstrapProvider ./ bootstrap-components.yaml -kubeadm BootstrapProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ bootstrap-components.yaml -kubekey-k3s BootstrapProvider https://github.com/kubesphere/kubekey/releases/latest/ bootstrap-components.yaml -microk8s BootstrapProvider https://github.com/canonical/cluster-api-bootstrap-provider-microk8s/releases/latest/ bootstrap-components.yaml -talos BootstrapProvider https://github.com/siderolabs/cluster-api-bootstrap-provider-talos/releases/latest/ bootstrap-components.yaml -kubeadm ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ control-plane-components.yaml -kubekey-k3s ControlPlaneProvider https://github.com/kubesphere/kubekey/releases/latest/ control-plane-components.yaml -microk8s ControlPlaneProvider https://github.com/canonical/cluster-api-control-plane-provider-microk8s/releases/latest/ control-plane-components.yaml -nested ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/ control-plane-components.yaml -talos ControlPlaneProvider https://github.com/siderolabs/cluster-api-control-plane-provider-talos/releases/latest/ control-plane-components.yaml -aws InfrastructureProvider my-aws-infrastructure-components.yaml -azure InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/latest/ infrastructure-components.yaml -byoh InfrastructureProvider https://github.com/vmware-tanzu/cluster-api-provider-bringyourownhost/releases/latest/ infrastructure-components.yaml -cloudstack InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-cloudstack/releases/latest/ infrastructure-components.yaml -coxedge InfrastructureProvider https://github.com/coxedge/cluster-api-provider-coxedge/releases/latest/ infrastructure-components.yaml -digitalocean InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-digitalocean/releases/latest/ infrastructure-components.yaml -docker InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ infrastructure-components-development.yaml -gcp InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-gcp/releases/latest/ infrastructure-components.yaml -hetzner InfrastructureProvider https://github.com/syself/cluster-api-provider-hetzner/releases/latest/ infrastructure-components.yaml -ibmcloud InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/releases/latest/ infrastructure-components.yaml -kubekey InfrastructureProvider https://github.com/kubesphere/kubekey/releases/latest/ infrastructure-components.yaml -kubevirt InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt/releases/latest/ infrastructure-components.yaml -maas InfrastructureProvider https://github.com/spectrocloud/cluster-api-provider-maas/releases/latest/ infrastructure-components.yaml -metal3 InfrastructureProvider https://github.com/metal3-io/cluster-api-provider-metal3/releases/latest/ infrastructure-components.yaml -my-infra-provider InfrastructureProvider /home/.cluster-api/overrides/infrastructure-docker/latest/ infrastructure-components.yaml -nested InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/ infrastructure-components.yaml -nutanix InfrastructureProvider https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/releases/latest/ infrastructure-components.yaml -oci InfrastructureProvider https://github.com/oracle/cluster-api-provider-oci/releases/latest/ infrastructure-components.yaml -openstack InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-openstack/releases/latest/ infrastructure-components.yaml -outscale InfrastructureProvider https://github.com/outscale/cluster-api-provider-outscale/releases/latest/ infrastructure-components.yaml -packet InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-packet/releases/latest/ infrastructure-components.yaml -sidero InfrastructureProvider https://github.com/siderolabs/sidero/releases/latest/ infrastructure-components.yaml -vcd InfrastructureProvider https://github.com/vmware/cluster-api-provider-cloud-director/releases/latest/ infrastructure-components.yaml -vcluster InfrastructureProvider https://github.com/loft-sh/cluster-api-provider-vcluster/releases/latest/ infrastructure-components.yaml -virtink InfrastructureProvider https://github.com/smartxworks/cluster-api-provider-virtink/releases/latest/ infrastructure-components.yaml -vsphere InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/latest/ infrastructure-components.yaml +var expectedOutputText = `NAME TYPE URL FILE +cluster-api CoreProvider https://github.com/myorg/myforkofclusterapi/releases/latest/ core_components.yaml +another-provider BootstrapProvider ./ bootstrap-components.yaml +k0sproject-k0smotron BootstrapProvider https://github.com/k0sproject/k0smotron/releases/latest/ bootstrap-components.yaml +kubeadm BootstrapProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ bootstrap-components.yaml +kubekey-k3s BootstrapProvider https://github.com/kubesphere/kubekey/releases/latest/ bootstrap-components.yaml +microk8s BootstrapProvider https://github.com/canonical/cluster-api-bootstrap-provider-microk8s/releases/latest/ bootstrap-components.yaml +ocne BootstrapProvider https://github.com/verrazzano/cluster-api-provider-ocne/releases/latest/ bootstrap-components.yaml +rke2 BootstrapProvider https://github.com/rancher-sandbox/cluster-api-provider-rke2/releases/latest/ bootstrap-components.yaml +talos BootstrapProvider https://github.com/siderolabs/cluster-api-bootstrap-provider-talos/releases/latest/ bootstrap-components.yaml +k0sproject-k0smotron ControlPlaneProvider https://github.com/k0sproject/k0smotron/releases/latest/ control-plane-components.yaml +kamaji ControlPlaneProvider https://github.com/clastix/cluster-api-control-plane-provider-kamaji/releases/latest/ control-plane-components.yaml +kubeadm ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ control-plane-components.yaml +kubekey-k3s ControlPlaneProvider https://github.com/kubesphere/kubekey/releases/latest/ control-plane-components.yaml +microk8s ControlPlaneProvider https://github.com/canonical/cluster-api-control-plane-provider-microk8s/releases/latest/ control-plane-components.yaml +nested ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/ control-plane-components.yaml +ocne ControlPlaneProvider https://github.com/verrazzano/cluster-api-provider-ocne/releases/latest/ control-plane-components.yaml +rke2 ControlPlaneProvider https://github.com/rancher-sandbox/cluster-api-provider-rke2/releases/latest/ control-plane-components.yaml +talos ControlPlaneProvider https://github.com/siderolabs/cluster-api-control-plane-provider-talos/releases/latest/ control-plane-components.yaml +aws InfrastructureProvider my-aws-infrastructure-components.yaml +azure InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/latest/ infrastructure-components.yaml +byoh InfrastructureProvider https://github.com/vmware-tanzu/cluster-api-provider-bringyourownhost/releases/latest/ infrastructure-components.yaml +cloudstack InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-cloudstack/releases/latest/ infrastructure-components.yaml +coxedge InfrastructureProvider https://github.com/coxedge/cluster-api-provider-coxedge/releases/latest/ infrastructure-components.yaml +digitalocean InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-digitalocean/releases/latest/ infrastructure-components.yaml +docker InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ infrastructure-components-development.yaml +gcp InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-gcp/releases/latest/ infrastructure-components.yaml +hetzner InfrastructureProvider https://github.com/syself/cluster-api-provider-hetzner/releases/latest/ infrastructure-components.yaml +hivelocity-hivelocity InfrastructureProvider https://github.com/hivelocity/cluster-api-provider-hivelocity/releases/latest/ infrastructure-components.yaml +ibmcloud InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/releases/latest/ infrastructure-components.yaml +in-memory InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ infrastructure-components-in-memory-development.yaml +k0sproject-k0smotron InfrastructureProvider https://github.com/k0sproject/k0smotron/releases/latest/ infrastructure-components.yaml +kubekey InfrastructureProvider https://github.com/kubesphere/kubekey/releases/latest/ infrastructure-components.yaml +kubevirt InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt/releases/latest/ infrastructure-components.yaml +linode-linode InfrastructureProvider https://github.com/linode/cluster-api-provider-linode/releases/latest/ infrastructure-components.yaml +maas InfrastructureProvider https://github.com/spectrocloud/cluster-api-provider-maas/releases/latest/ infrastructure-components.yaml +metal3 InfrastructureProvider https://github.com/metal3-io/cluster-api-provider-metal3/releases/latest/ infrastructure-components.yaml +my-infra-provider InfrastructureProvider /home/.config/cluster-api/overrides/infrastructure-docker/latest/ infrastructure-components.yaml +nested InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/ infrastructure-components.yaml +nutanix InfrastructureProvider https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/releases/latest/ infrastructure-components.yaml +oci InfrastructureProvider https://github.com/oracle/cluster-api-provider-oci/releases/latest/ infrastructure-components.yaml +openstack InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-openstack/releases/latest/ infrastructure-components.yaml +outscale InfrastructureProvider https://github.com/outscale/cluster-api-provider-outscale/releases/latest/ infrastructure-components.yaml +packet InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-packet/releases/latest/ infrastructure-components.yaml +proxmox InfrastructureProvider https://github.com/ionos-cloud/cluster-api-provider-proxmox/releases/latest/ infrastructure-components.yaml +sidero InfrastructureProvider https://github.com/siderolabs/sidero/releases/latest/ infrastructure-components.yaml +tinkerbell-tinkerbell InfrastructureProvider https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases/latest/ infrastructure-components.yaml +vcd InfrastructureProvider https://github.com/vmware/cluster-api-provider-cloud-director/releases/latest/ infrastructure-components.yaml +vcluster InfrastructureProvider https://github.com/loft-sh/cluster-api-provider-vcluster/releases/latest/ infrastructure-components.yaml +virtink InfrastructureProvider https://github.com/smartxworks/cluster-api-provider-virtink/releases/latest/ infrastructure-components.yaml +vsphere InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/latest/ infrastructure-components.yaml +in-cluster IPAMProvider https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster/releases/latest/ ipam-components.yaml +helm AddonProvider https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/latest/ addon-components.yaml ` var expectedOutputYaml = `- File: core_components.yaml @@ -147,6 +165,10 @@ var expectedOutputYaml = `- File: core_components.yaml Name: another-provider ProviderType: BootstrapProvider URL: ./ +- File: bootstrap-components.yaml + Name: k0sproject-k0smotron + ProviderType: BootstrapProvider + URL: https://github.com/k0sproject/k0smotron/releases/latest/ - File: bootstrap-components.yaml Name: kubeadm ProviderType: BootstrapProvider @@ -159,10 +181,26 @@ var expectedOutputYaml = `- File: core_components.yaml Name: microk8s ProviderType: BootstrapProvider URL: https://github.com/canonical/cluster-api-bootstrap-provider-microk8s/releases/latest/ +- File: bootstrap-components.yaml + Name: ocne + ProviderType: BootstrapProvider + URL: https://github.com/verrazzano/cluster-api-provider-ocne/releases/latest/ +- File: bootstrap-components.yaml + Name: rke2 + ProviderType: BootstrapProvider + URL: https://github.com/rancher-sandbox/cluster-api-provider-rke2/releases/latest/ - File: bootstrap-components.yaml Name: talos ProviderType: BootstrapProvider URL: https://github.com/siderolabs/cluster-api-bootstrap-provider-talos/releases/latest/ +- File: control-plane-components.yaml + Name: k0sproject-k0smotron + ProviderType: ControlPlaneProvider + URL: https://github.com/k0sproject/k0smotron/releases/latest/ +- File: control-plane-components.yaml + Name: kamaji + ProviderType: ControlPlaneProvider + URL: https://github.com/clastix/cluster-api-control-plane-provider-kamaji/releases/latest/ - File: control-plane-components.yaml Name: kubeadm ProviderType: ControlPlaneProvider @@ -179,6 +217,14 @@ var expectedOutputYaml = `- File: core_components.yaml Name: nested ProviderType: ControlPlaneProvider URL: https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/ +- File: control-plane-components.yaml + Name: ocne + ProviderType: ControlPlaneProvider + URL: https://github.com/verrazzano/cluster-api-provider-ocne/releases/latest/ +- File: control-plane-components.yaml + Name: rke2 + ProviderType: ControlPlaneProvider + URL: https://github.com/rancher-sandbox/cluster-api-provider-rke2/releases/latest/ - File: control-plane-components.yaml Name: talos ProviderType: ControlPlaneProvider @@ -219,10 +265,22 @@ var expectedOutputYaml = `- File: core_components.yaml Name: hetzner ProviderType: InfrastructureProvider URL: https://github.com/syself/cluster-api-provider-hetzner/releases/latest/ +- File: infrastructure-components.yaml + Name: hivelocity-hivelocity + ProviderType: InfrastructureProvider + URL: https://github.com/hivelocity/cluster-api-provider-hivelocity/releases/latest/ - File: infrastructure-components.yaml Name: ibmcloud ProviderType: InfrastructureProvider URL: https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/releases/latest/ +- File: infrastructure-components-in-memory-development.yaml + Name: in-memory + ProviderType: InfrastructureProvider + URL: https://github.com/kubernetes-sigs/cluster-api/releases/latest/ +- File: infrastructure-components.yaml + Name: k0sproject-k0smotron + ProviderType: InfrastructureProvider + URL: https://github.com/k0sproject/k0smotron/releases/latest/ - File: infrastructure-components.yaml Name: kubekey ProviderType: InfrastructureProvider @@ -231,6 +289,10 @@ var expectedOutputYaml = `- File: core_components.yaml Name: kubevirt ProviderType: InfrastructureProvider URL: https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt/releases/latest/ +- File: infrastructure-components.yaml + Name: linode-linode + ProviderType: InfrastructureProvider + URL: https://github.com/linode/cluster-api-provider-linode/releases/latest/ - File: infrastructure-components.yaml Name: maas ProviderType: InfrastructureProvider @@ -242,7 +304,7 @@ var expectedOutputYaml = `- File: core_components.yaml - File: infrastructure-components.yaml Name: my-infra-provider ProviderType: InfrastructureProvider - URL: /home/.cluster-api/overrides/infrastructure-docker/latest/ + URL: /home/.config/cluster-api/overrides/infrastructure-docker/latest/ - File: infrastructure-components.yaml Name: nested ProviderType: InfrastructureProvider @@ -267,10 +329,18 @@ var expectedOutputYaml = `- File: core_components.yaml Name: packet ProviderType: InfrastructureProvider URL: https://github.com/kubernetes-sigs/cluster-api-provider-packet/releases/latest/ +- File: infrastructure-components.yaml + Name: proxmox + ProviderType: InfrastructureProvider + URL: https://github.com/ionos-cloud/cluster-api-provider-proxmox/releases/latest/ - File: infrastructure-components.yaml Name: sidero ProviderType: InfrastructureProvider URL: https://github.com/siderolabs/sidero/releases/latest/ +- File: infrastructure-components.yaml + Name: tinkerbell-tinkerbell + ProviderType: InfrastructureProvider + URL: https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases/latest/ - File: infrastructure-components.yaml Name: vcd ProviderType: InfrastructureProvider @@ -287,4 +357,12 @@ var expectedOutputYaml = `- File: core_components.yaml Name: vsphere ProviderType: InfrastructureProvider URL: https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/latest/ +- File: ipam-components.yaml + Name: in-cluster + ProviderType: IPAMProvider + URL: https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster/releases/latest/ +- File: addon-components.yaml + Name: helm + ProviderType: AddonProvider + URL: https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/latest/ ` diff --git a/cmd/clusterctl/cmd/delete.go b/cmd/clusterctl/cmd/delete.go index 662a505b93b3..97a6e0c9a0e6 100644 --- a/cmd/clusterctl/cmd/delete.go +++ b/cmd/clusterctl/cmd/delete.go @@ -17,6 +17,8 @@ limitations under the License. package cmd import ( + "context" + "github.com/pkg/errors" "github.com/spf13/cobra" @@ -32,6 +34,7 @@ type deleteOptions struct { infrastructureProviders []string ipamProviders []string runtimeExtensionProviders []string + addonProviders []string includeNamespace bool includeCRDs bool deleteAll bool @@ -81,7 +84,7 @@ var deleteCmd = &cobra.Command{ # are "orphaned" and thus there may be ongoing costs incurred as a result of this. clusterctl delete --all --include-crd --include-namespace`), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runDelete() }, } @@ -109,6 +112,8 @@ func init() { "IPAM providers and versions (e.g. infoblox:v0.0.1) to delete from the management cluster") deleteCmd.Flags().StringSliceVar(&dd.runtimeExtensionProviders, "runtime-extension", nil, "Runtime extension providers and versions (e.g. test:v0.0.1) to delete from the management cluster") + deleteCmd.Flags().StringSliceVar(&dd.addonProviders, "addon", nil, + "Add-on providers and versions (e.g. helm:v0.1.0) to delete from the management cluster") deleteCmd.Flags().BoolVar(&dd.deleteAll, "all", false, "Force deletion of all the providers") @@ -117,7 +122,9 @@ func init() { } func runDelete() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -127,17 +134,18 @@ func runDelete() error { (len(dd.controlPlaneProviders) > 0) || (len(dd.infrastructureProviders) > 0) || (len(dd.ipamProviders) > 0) || - (len(dd.runtimeExtensionProviders) > 0) + (len(dd.runtimeExtensionProviders) > 0) || + (len(dd.addonProviders) > 0) if dd.deleteAll && hasProviderNames { - return errors.New("The --all flag can't be used in combination with --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension") + return errors.New("The --all flag can't be used in combination with --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon") } if !dd.deleteAll && !hasProviderNames { - return errors.New("At least one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension should be specified or the --all flag should be set") + return errors.New("At least one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon should be specified or the --all flag should be set") } - return c.Delete(client.DeleteOptions{ + return c.Delete(ctx, client.DeleteOptions{ Kubeconfig: client.Kubeconfig{Path: dd.kubeconfig, Context: dd.kubeconfigContext}, IncludeNamespace: dd.includeNamespace, IncludeCRDs: dd.includeCRDs, @@ -147,6 +155,7 @@ func runDelete() error { ControlPlaneProviders: dd.controlPlaneProviders, IPAMProviders: dd.ipamProviders, RuntimeExtensionProviders: dd.runtimeExtensionProviders, + AddonProviders: dd.addonProviders, DeleteAll: dd.deleteAll, }) } diff --git a/cmd/clusterctl/cmd/describe_cluster.go b/cmd/clusterctl/cmd/describe_cluster.go index b73cdf3e75d0..7a7994376133 100644 --- a/cmd/clusterctl/cmd/describe_cluster.go +++ b/cmd/clusterctl/cmd/describe_cluster.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "os" "sort" @@ -95,7 +96,7 @@ var describeClusterClusterCmd = &cobra.Command{ # also when their status is the same as the status of the corresponding machine object. clusterctl describe cluster test-1 --echo`), - Args: func(cmd *cobra.Command, args []string) error { + Args: func(_ *cobra.Command, args []string) error { if len(args) != 1 { return errors.New("please specify a cluster name") } @@ -146,12 +147,14 @@ func init() { } func runDescribeCluster(cmd *cobra.Command, name string) error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - tree, err := c.DescribeCluster(client.DescribeClusterOptions{ + tree, err := c.DescribeCluster(ctx, client.DescribeClusterOptions{ Kubeconfig: client.Kubeconfig{Path: dc.kubeconfig, Context: dc.kubeconfigContext}, Namespace: dc.namespace, ClusterName: name, diff --git a/cmd/clusterctl/cmd/generate_cluster.go b/cmd/clusterctl/cmd/generate_cluster.go index 066a24a7e0cc..9b95911e5831 100644 --- a/cmd/clusterctl/cmd/generate_cluster.go +++ b/cmd/clusterctl/cmd/generate_cluster.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "github.com/pkg/errors" @@ -55,7 +56,7 @@ var generateClusterClusterCmd = &cobra.Command{ Generate templates for creating workload clusters. clusterctl ships with a list of known providers; if necessary, edit - $HOME/.cluster-api/clusterctl.yaml to add new provider or to customize existing ones. + $XDG_CONFIG_HOME/cluster-api/clusterctl.yaml to add new provider or to customize existing ones. Each provider configuration links to a repository; clusterctl uses this information to fetch templates when creating a new cluster.`), @@ -91,7 +92,7 @@ var generateClusterClusterCmd = &cobra.Command{ # Prints the list of variables required by the yaml file for creating workload cluster. clusterctl generate cluster my-cluster --list-variables`), - Args: func(cmd *cobra.Command, args []string) error { + Args: func(_ *cobra.Command, args []string) error { if len(args) != 1 { return errors.New("please specify a cluster name") } @@ -112,11 +113,12 @@ func init() { generateClusterClusterCmd.Flags().StringVarP(&gc.targetNamespace, "target-namespace", "n", "", "The namespace to use for the workload cluster. If unspecified, the current namespace will be used.") generateClusterClusterCmd.Flags().StringVar(&gc.kubernetesVersion, "kubernetes-version", "", - "The Kubernetes version to use for the workload cluster. If unspecified, the value from OS environment variables or the .cluster-api/clusterctl.yaml config file will be used.") + "The Kubernetes version to use for the workload cluster. If unspecified, the value from OS environment variables or the $XDG_CONFIG_HOME/cluster-api/clusterctl.yaml config file will be used.") generateClusterClusterCmd.Flags().Int64Var(&gc.controlPlaneMachineCount, "control-plane-machine-count", 1, "The number of control plane machines for the workload cluster.") + // Remove default from hard coded text if the default is ever changed from 0 since cobra would then add it generateClusterClusterCmd.Flags().Int64Var(&gc.workerMachineCount, "worker-machine-count", 0, - "The number of worker machines for the workload cluster.") + "The number of worker machines for the workload cluster. (default 0)") // flags for the repository source generateClusterClusterCmd.Flags().StringVarP(&gc.infrastructureProvider, "infrastructure", "i", "", @@ -145,7 +147,9 @@ func init() { } func runGenerateClusterTemplate(cmd *cobra.Command, name string) error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -186,7 +190,7 @@ func runGenerateClusterTemplate(cmd *cobra.Command, name string) error { } } - template, err := c.GetClusterTemplate(templateOptions) + template, err := c.GetClusterTemplate(ctx, templateOptions) if err != nil { return err } diff --git a/cmd/clusterctl/cmd/generate_provider.go b/cmd/clusterctl/cmd/generate_provider.go index c9a335f0162f..187507d353ac 100644 --- a/cmd/clusterctl/cmd/generate_provider.go +++ b/cmd/clusterctl/cmd/generate_provider.go @@ -17,6 +17,8 @@ limitations under the License. package cmd import ( + "context" + "github.com/pkg/errors" "github.com/spf13/cobra" @@ -31,6 +33,7 @@ type generateProvidersOptions struct { infrastructureProvider string ipamProvider string runtimeExtensionProvider string + addonProvider string targetNamespace string textOutput bool raw bool @@ -71,7 +74,7 @@ var generateProviderCmd = &cobra.Command{ # No variables will be processed and substituted using this flag clusterctl generate provider --infrastructure aws:v0.4.1 --raw`), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runGenerateProviderComponents() }, } @@ -89,6 +92,8 @@ func init() { "IPAM provider and version (e.g. infoblox:v0.0.1)") generateProviderCmd.Flags().StringVar(&gpo.runtimeExtensionProvider, "runtime-extension", "", "Runtime extension provider and version (e.g. test:v0.0.1)") + generateProviderCmd.Flags().StringVar(&gpo.addonProvider, "addon", "", + "Add-on provider and version (e.g. helm:v0.1.0)") generateProviderCmd.Flags().StringVarP(&gpo.targetNamespace, "target-namespace", "n", "", "The target namespace where the provider should be deployed. If unspecified, the components default namespace is used.") generateProviderCmd.Flags().BoolVar(&gpo.textOutput, "describe", false, @@ -102,11 +107,13 @@ func init() { } func runGenerateProviderComponents() error { + ctx := context.Background() + providerName, providerType, err := parseProvider() if err != nil { return err } - c, err := client.New(cfgFile) + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -116,7 +123,7 @@ func runGenerateProviderComponents() error { SkipTemplateProcess: gpo.raw || gpo.textOutput, } - components, err := c.GenerateProvider(providerName, providerType, options) + components, err := c.GenerateProvider(ctx, providerName, providerType, options) if err != nil { return err } @@ -134,41 +141,48 @@ func parseProvider() (string, clusterctlv1.ProviderType, error) { providerType := clusterctlv1.CoreProviderType if gpo.bootstrapProvider != "" { if providerName != "" { - return "", "", errors.New("only one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension should be set") + return "", "", errors.New("only one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon should be set") } providerName = gpo.bootstrapProvider providerType = clusterctlv1.BootstrapProviderType } if gpo.controlPlaneProvider != "" { if providerName != "" { - return "", "", errors.New("only one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension should be set") + return "", "", errors.New("only one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon should be set") } providerName = gpo.controlPlaneProvider providerType = clusterctlv1.ControlPlaneProviderType } if gpo.infrastructureProvider != "" { if providerName != "" { - return "", "", errors.New("only one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension should be set") + return "", "", errors.New("only one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon should be set") } providerName = gpo.infrastructureProvider providerType = clusterctlv1.InfrastructureProviderType } if gpo.ipamProvider != "" { if providerName != "" { - return "", "", errors.New("only one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension should be set") + return "", "", errors.New("only one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon should be set") } providerName = gpo.ipamProvider providerType = clusterctlv1.IPAMProviderType } if gpo.runtimeExtensionProvider != "" { if providerName != "" { - return "", "", errors.New("only one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension should be set") + return "", "", errors.New("only one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon should be set") } providerName = gpo.runtimeExtensionProvider providerType = clusterctlv1.RuntimeExtensionProviderType } + if gpo.addonProvider != "" { + if providerName != "" { + return "", "", errors.New("only one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon should be set") + } + providerName = gpo.addonProvider + providerType = clusterctlv1.AddonProviderType + } if providerName == "" { - return "", "", errors.New("at least one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension should be set") + return "", "", errors.New("at least one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon should be set") } return providerName, providerType, nil diff --git a/cmd/clusterctl/cmd/generate_yaml.go b/cmd/clusterctl/cmd/generate_yaml.go index cf09b38e023b..5be1e06be2ed 100644 --- a/cmd/clusterctl/cmd/generate_yaml.go +++ b/cmd/clusterctl/cmd/generate_yaml.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "io" "os" @@ -61,7 +62,7 @@ var generateYamlCmd = &cobra.Command{ cat ~/workspace/cluster-template.yaml | clusterctl generate yaml --list-variables `), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return generateYAML(os.Stdin, os.Stdout) }, } @@ -79,7 +80,9 @@ func init() { } func generateYAML(r io.Reader, w io.Writer) error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -97,7 +100,7 @@ func generateYAML(r io.Reader, w io.Writer) error { } } } - printer, err := c.ProcessYAML(options) + printer, err := c.ProcessYAML(ctx, options) if err != nil { return err } diff --git a/cmd/clusterctl/cmd/generate_yaml_test.go b/cmd/clusterctl/cmd/generate_yaml_test.go index ccab5a914002..1b01ed1532e7 100644 --- a/cmd/clusterctl/cmd/generate_yaml_test.go +++ b/cmd/clusterctl/cmd/generate_yaml_test.go @@ -118,7 +118,7 @@ v3: default3 // the filepath and a cleanup function for the temp directory. func createTempFile(g *WithT, contents string) (string, func()) { dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) templateFile := filepath.Join(dir, "templ.yaml") g.Expect(os.WriteFile(templateFile, []byte(contents), 0600)).To(Succeed()) diff --git a/cmd/clusterctl/cmd/get_kubeconfig.go b/cmd/clusterctl/cmd/get_kubeconfig.go index e2e95fda048a..e580b97eb85b 100644 --- a/cmd/clusterctl/cmd/get_kubeconfig.go +++ b/cmd/clusterctl/cmd/get_kubeconfig.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "github.com/pkg/errors" @@ -47,13 +48,13 @@ var getKubeconfigCmd = &cobra.Command{ # Get the workload cluster's kubeconfig in a particular namespace. clusterctl get kubeconfig --namespace foo`), - Args: func(cmd *cobra.Command, args []string) error { + Args: func(_ *cobra.Command, args []string) error { if len(args) != 1 { return errors.New("please specify a workload cluster name") } return nil }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runGetKubeconfig(args[0]) }, } @@ -79,7 +80,9 @@ func init() { } func runGetKubeconfig(workloadClusterName string) error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -90,7 +93,7 @@ func runGetKubeconfig(workloadClusterName string) error { Namespace: gk.namespace, } - out, err := c.GetKubeconfig(options) + out, err := c.GetKubeconfig(ctx, options) if err != nil { return err } diff --git a/cmd/clusterctl/cmd/init.go b/cmd/clusterctl/cmd/init.go index 9d6eafbdbf9d..18dd3edab328 100644 --- a/cmd/clusterctl/cmd/init.go +++ b/cmd/clusterctl/cmd/init.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "time" "github.com/spf13/cobra" @@ -33,6 +34,7 @@ type initOptions struct { infrastructureProviders []string ipamProviders []string runtimeExtensionProviders []string + addonProviders []string targetNamespace string validate bool waitProviders bool @@ -54,11 +56,12 @@ var initCmd = &cobra.Command{ The management cluster must be an existing Kubernetes cluster, make sure to have enough privileges to install the desired components. - Use 'clusterctl config repositories' to get a list of available providers; if necessary, edit - $HOME/.cluster-api/clusterctl.yaml file to add new provider or to customize existing ones. + Use 'clusterctl config repositories' to get a list of available providers and their configuration; if + necessary, edit $XDG_CONFIG_HOME/cluster-api/clusterctl.yaml file to add new provider or to customize existing ones. Some providers require environment variables to be set before running clusterctl init. - Refer to the provider documentation, or use 'clusterctl config provider [name]' to get a list of required variables. + Refer to the provider documentation, or use 'clusterctl generate provider --infrastructure [name] --describe' + to get a list of required variables. See https://cluster-api.sigs.k8s.io for more details.`), @@ -73,7 +76,7 @@ var initCmd = &cobra.Command{ clusterctl init --infrastructure=aws:v0.4.1 # Initialize a management cluster with a custom kubeconfig path and the given infrastructure provider. - clusterctl init --kubeconfig=foo.yaml --infrastructure=aws + clusterctl init --kubeconfig=foo.yaml --infrastructure=aws # Initialize a management cluster with multiple infrastructure providers. clusterctl init --infrastructure=aws,vsphere @@ -81,7 +84,7 @@ var initCmd = &cobra.Command{ # Initialize a management cluster with a custom target namespace for the provider resources. clusterctl init --infrastructure aws --target-namespace foo`), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runInit() }, } @@ -100,9 +103,11 @@ func init() { initCmd.PersistentFlags().StringSliceVarP(&initOpts.controlPlaneProviders, "control-plane", "c", nil, "Control plane providers and versions (e.g. kubeadm:v1.1.5) to add to the management cluster. If unspecified, the Kubeadm control plane provider's latest release is used.") initCmd.PersistentFlags().StringSliceVar(&initOpts.ipamProviders, "ipam", nil, - "IPAM providers and versions (e.g. infoblox:v0.0.1) to add to the management cluster.") + "IPAM providers and versions (e.g. in-cluster:v0.1.0) to add to the management cluster.") initCmd.PersistentFlags().StringSliceVar(&initOpts.runtimeExtensionProviders, "runtime-extension", nil, - "Runtime extension providers and versions (e.g. test:v0.0.1) to add to the management cluster.") + "Runtime extension providers and versions to add to the management cluster; please note that clusterctl doesn't include any default runtime extensions and thus it is required to use custom configuration files to register runtime extensions.") + initCmd.PersistentFlags().StringSliceVar(&initOpts.addonProviders, "addon", nil, + "Add-on providers and versions (e.g. helm:v0.1.0) to add to the management cluster.") initCmd.Flags().StringVarP(&initOpts.targetNamespace, "target-namespace", "n", "", "The target namespace where the providers should be deployed. If unspecified, the provider components' default namespace is used.") initCmd.Flags().BoolVar(&initOpts.waitProviders, "wait-providers", false, @@ -117,7 +122,9 @@ func init() { } func runInit() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -130,6 +137,7 @@ func runInit() error { InfrastructureProviders: initOpts.infrastructureProviders, IPAMProviders: initOpts.ipamProviders, RuntimeExtensionProviders: initOpts.runtimeExtensionProviders, + AddonProviders: initOpts.addonProviders, TargetNamespace: initOpts.targetNamespace, LogUsageInstructions: true, WaitProviders: initOpts.waitProviders, @@ -137,7 +145,7 @@ func runInit() error { IgnoreValidationErrors: !initOpts.validate, } - if _, err := c.Init(options); err != nil { + if _, err := c.Init(ctx, options); err != nil { return err } return nil diff --git a/cmd/clusterctl/cmd/init_list_images.go b/cmd/clusterctl/cmd/init_list_images.go index f087fa804e84..f8b6eb4e26ce 100644 --- a/cmd/clusterctl/cmd/init_list_images.go +++ b/cmd/clusterctl/cmd/init_list_images.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "github.com/spf13/cobra" @@ -42,13 +43,15 @@ var initListImagesCmd = &cobra.Command{ clusterctl init list-images --infrastructure vcd --bootstrap kubeadm --control-plane nested --core cluster-api:v1.2.0 `), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runInitListImages() }, } func runInitListImages() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -61,10 +64,11 @@ func runInitListImages() error { InfrastructureProviders: initOpts.infrastructureProviders, IPAMProviders: initOpts.ipamProviders, RuntimeExtensionProviders: initOpts.runtimeExtensionProviders, + AddonProviders: initOpts.addonProviders, LogUsageInstructions: false, } - images, err := c.InitImages(options) + images, err := c.InitImages(ctx, options) if err != nil { return err } diff --git a/cmd/clusterctl/cmd/move.go b/cmd/clusterctl/cmd/move.go index c75557e0aa10..2a51bf2c0d9b 100644 --- a/cmd/clusterctl/cmd/move.go +++ b/cmd/clusterctl/cmd/move.go @@ -17,6 +17,8 @@ limitations under the License. package cmd import ( + "context" + "github.com/pkg/errors" "github.com/spf13/cobra" @@ -56,7 +58,7 @@ var moveCmd = &cobra.Command{ clusterctl move --from-directory /tmp/backup-directory `), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runMove() }, } @@ -87,6 +89,8 @@ func init() { } func runMove() error { + ctx := context.Background() + if mo.toDirectory == "" && mo.fromDirectory == "" && mo.toKubeconfig == "" && @@ -94,12 +98,12 @@ func runMove() error { return errors.New("please specify a target cluster using the --to-kubeconfig flag when not using --dry-run, --to-directory or --from-directory") } - c, err := client.New(cfgFile) + c, err := client.New(ctx, cfgFile) if err != nil { return err } - return c.Move(client.MoveOptions{ + return c.Move(ctx, client.MoveOptions{ FromKubeconfig: client.Kubeconfig{Path: mo.fromKubeconfig, Context: mo.fromKubeconfigContext}, ToKubeconfig: client.Kubeconfig{Path: mo.toKubeconfig, Context: mo.toKubeconfigContext}, FromDirectory: mo.fromDirectory, diff --git a/cmd/clusterctl/cmd/rollout/pause.go b/cmd/clusterctl/cmd/rollout/pause.go index 8e29ed200fec..9c72bed80d41 100644 --- a/cmd/clusterctl/cmd/rollout/pause.go +++ b/cmd/clusterctl/cmd/rollout/pause.go @@ -18,6 +18,8 @@ limitations under the License. package rollout import ( + "context" + "github.com/spf13/cobra" "k8s.io/kubectl/pkg/util/templates" @@ -56,7 +58,7 @@ func NewCmdRolloutPause(cfgFile string) *cobra.Command { Short: "Pause a cluster-api resource", Long: pauseLong, Example: pauseExample, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runPause(cfgFile, args) }, } @@ -72,12 +74,14 @@ func NewCmdRolloutPause(cfgFile string) *cobra.Command { func runPause(cfgFile string, args []string) error { pauseOpt.resources = args - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - return c.RolloutPause(client.RolloutPauseOptions{ + return c.RolloutPause(ctx, client.RolloutPauseOptions{ Kubeconfig: client.Kubeconfig{Path: pauseOpt.kubeconfig, Context: pauseOpt.kubeconfigContext}, Namespace: pauseOpt.namespace, Resources: pauseOpt.resources, diff --git a/cmd/clusterctl/cmd/rollout/restart.go b/cmd/clusterctl/cmd/rollout/restart.go index 7f46fac317fd..b582c6c46411 100644 --- a/cmd/clusterctl/cmd/rollout/restart.go +++ b/cmd/clusterctl/cmd/rollout/restart.go @@ -17,6 +17,8 @@ limitations under the License. package rollout import ( + "context" + "github.com/spf13/cobra" "k8s.io/kubectl/pkg/util/templates" @@ -71,12 +73,14 @@ func NewCmdRolloutRestart(cfgFile string) *cobra.Command { func runRestart(cfgFile string, _ *cobra.Command, args []string) error { restartOpt.resources = args - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - return c.RolloutRestart(client.RolloutRestartOptions{ + return c.RolloutRestart(ctx, client.RolloutRestartOptions{ Kubeconfig: client.Kubeconfig{Path: restartOpt.kubeconfig, Context: restartOpt.kubeconfigContext}, Namespace: restartOpt.namespace, Resources: restartOpt.resources, diff --git a/cmd/clusterctl/cmd/rollout/resume.go b/cmd/clusterctl/cmd/rollout/resume.go index 07dc4bc1fa5f..393a8fc242a7 100644 --- a/cmd/clusterctl/cmd/rollout/resume.go +++ b/cmd/clusterctl/cmd/rollout/resume.go @@ -17,6 +17,8 @@ limitations under the License. package rollout import ( + "context" + "github.com/spf13/cobra" "k8s.io/kubectl/pkg/util/templates" @@ -55,7 +57,7 @@ func NewCmdRolloutResume(cfgFile string) *cobra.Command { Short: "Resume a cluster-api resource", Long: resumeLong, Example: resumeExample, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runResume(cfgFile, args) }, } @@ -71,12 +73,14 @@ func NewCmdRolloutResume(cfgFile string) *cobra.Command { func runResume(cfgFile string, args []string) error { resumeOpt.resources = args - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - return c.RolloutResume(client.RolloutResumeOptions{ + return c.RolloutResume(ctx, client.RolloutResumeOptions{ Kubeconfig: client.Kubeconfig{Path: resumeOpt.kubeconfig, Context: resumeOpt.kubeconfigContext}, Namespace: resumeOpt.namespace, Resources: resumeOpt.resources, diff --git a/cmd/clusterctl/cmd/rollout/undo.go b/cmd/clusterctl/cmd/rollout/undo.go index cfa6603009b4..aec6af75c580 100644 --- a/cmd/clusterctl/cmd/rollout/undo.go +++ b/cmd/clusterctl/cmd/rollout/undo.go @@ -17,6 +17,8 @@ limitations under the License. package rollout import ( + "context" + "github.com/spf13/cobra" "k8s.io/kubectl/pkg/util/templates" @@ -54,7 +56,7 @@ func NewCmdRolloutUndo(cfgFile string) *cobra.Command { Short: "Undo a cluster-api resource", Long: undoLong, Example: undoExample, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runUndo(cfgFile, args) }, } @@ -71,12 +73,14 @@ func NewCmdRolloutUndo(cfgFile string) *cobra.Command { func runUndo(cfgFile string, args []string) error { undoOpt.resources = args - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - return c.RolloutUndo(client.RolloutUndoOptions{ + return c.RolloutUndo(ctx, client.RolloutUndoOptions{ Kubeconfig: client.Kubeconfig{Path: undoOpt.kubeconfig, Context: undoOpt.kubeconfigContext}, Namespace: undoOpt.namespace, Resources: undoOpt.resources, diff --git a/cmd/clusterctl/cmd/root.go b/cmd/clusterctl/cmd/root.go index 517a42024e71..89366a78ddd4 100644 --- a/cmd/clusterctl/cmd/root.go +++ b/cmd/clusterctl/cmd/root.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "flag" "fmt" "os" @@ -25,9 +26,11 @@ import ( "strings" "github.com/MakeNowJust/heredoc" + "github.com/adrg/xdg" "github.com/pkg/errors" "github.com/spf13/cobra" - "k8s.io/client-go/util/homedir" + kubectlcmd "k8s.io/kubectl/pkg/cmd" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" @@ -56,20 +59,12 @@ var RootCmd = &cobra.Command{ Long: LongDesc(` Get started with Cluster API using clusterctl to create a management cluster, install providers, and create templates for your workload cluster.`), - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - // Check if Config folder (~/.cluster-api) exist and if not create it - configFolderPath := filepath.Join(homedir.HomeDir(), config.ConfigFolder) - if _, err := os.Stat(configFolderPath); os.IsNotExist(err) { - if err := os.MkdirAll(filepath.Dir(configFolderPath), os.ModePerm); err != nil { - return errors.Wrapf(err, "failed to create the clusterctl config directory: %s", configFolderPath) - } - } - return nil - }, - PersistentPostRunE: func(cmd *cobra.Command, args []string) error { + PersistentPostRunE: func(*cobra.Command, []string) error { + ctx := context.Background() + // Check if clusterctl needs an upgrade "AFTER" running each command // and sub-command. - configClient, err := config.New(cfgFile) + configClient, err := config.New(ctx, cfgFile) if err != nil { return err } @@ -78,7 +73,11 @@ var RootCmd = &cobra.Command{ // version check is disabled. Return early. return nil } - output, err := newVersionChecker(configClient.Variables()).Check() + checker, err := newVersionChecker(ctx, configClient.Variables()) + if err != nil { + return err + } + output, err := checker.Check(ctx) if err != nil { return errors.Wrap(err, "unable to verify clusterctl version") } @@ -87,8 +86,13 @@ var RootCmd = &cobra.Command{ fmt.Fprintf(os.Stderr, "\033[33m%s\033[0m", output) } + configDirectory, err := xdg.ConfigFile(config.ConfigFolderXDG) + if err != nil { + return err + } + // clean the downloaded config if was fetched from remote - downloadConfigFile := filepath.Join(homedir.HomeDir(), config.ConfigFolder, config.DownloadConfigFile) + downloadConfigFile := filepath.Join(configDirectory, config.DownloadConfigFile) if _, err := os.Stat(downloadConfigFile); err == nil { if verbosity != nil && *verbosity >= 5 { fmt.Fprintf(os.Stdout, "Removing downloaded clusterctl config file: %s\n", config.DownloadConfigFile) @@ -102,6 +106,8 @@ var RootCmd = &cobra.Command{ // Execute executes the root command. func Execute() { + handlePlugins() + if err := RootCmd.Execute(); err != nil { if verbosity != nil && *verbosity >= 5 { if err, ok := err.(stackTracer); ok { @@ -122,7 +128,7 @@ func init() { RootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", - "Path to clusterctl configuration (default is `$HOME/.cluster-api/clusterctl.yaml`) or to a remote location (i.e. https://example.com/clusterctl.yaml)") + "Path to clusterctl configuration (default is `$XDG_CONFIG_HOME/cluster-api/clusterctl.yaml`) or to a remote location (i.e. https://example.com/clusterctl.yaml)") RootCmd.AddGroup( &cobra.Group{ @@ -145,9 +151,11 @@ func init() { } func initConfig() { + ctx := context.Background() + // check if the CLUSTERCTL_LOG_LEVEL was set via env var or in the config file if *verbosity == 0 { - configClient, err := config.New(cfgFile) + configClient, err := config.New(ctx, cfgFile) if err == nil { v, err := configClient.Variables().Get("CLUSTERCTL_LOG_LEVEL") if err == nil && v != "" { @@ -161,7 +169,9 @@ func initConfig() { } } - logf.SetLogger(logf.NewLogger(logf.WithThreshold(verbosity))) + log := logf.NewLogger(logf.WithThreshold(verbosity)) + logf.SetLogger(log) + ctrl.SetLogger(log) } func registerCompletionFuncForCommonFlags() { @@ -182,6 +192,39 @@ func registerCompletionFuncForCommonFlags() { }) } +func handlePlugins() { + args := os.Args + pluginHandler := kubectlcmd.NewDefaultPluginHandler([]string{"clusterctl"}) + if len(args) > 1 { + cmdPathPieces := args[1:] + + // only look for suitable extension executables if + // the specified command does not already exist + if _, _, err := RootCmd.Find(cmdPathPieces); err != nil { + // Also check the commands that will be added by Cobra. + // These commands are only added once rootCmd.Execute() is called, so we + // need to check them explicitly here. + var cmdName string // first "non-flag" arguments + for _, arg := range cmdPathPieces { + if !strings.HasPrefix(arg, "-") { + cmdName = arg + break + } + } + + switch cmdName { + case "help", cobra.ShellCompRequestCmd, cobra.ShellCompNoDescRequestCmd: + // Don't search for a plugin + default: + if err := kubectlcmd.HandlePluginCommand(pluginHandler, cmdPathPieces, false); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + } + } + } +} + const indentation = ` ` // LongDesc normalizes a command's long description to follow the conventions. diff --git a/cmd/clusterctl/cmd/topology_plan.go b/cmd/clusterctl/cmd/topology_plan.go index 8a8ede41bfe5..9111e00b6a69 100644 --- a/cmd/clusterctl/cmd/topology_plan.go +++ b/cmd/clusterctl/cmd/topology_plan.go @@ -17,6 +17,8 @@ limitations under the License. package cmd import ( + "context" + "errors" "fmt" "io" "os" @@ -27,7 +29,7 @@ import ( "strings" "github.com/olekukonko/tablewriter" - "github.com/pkg/errors" + pkgerrors "github.com/pkg/errors" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/utils/exec" @@ -82,7 +84,7 @@ var topologyPlanCmd = &cobra.Command{ clusterctl alpha topology plan -f modified-template.yaml -o output/ `), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runTopologyPlan() }, } @@ -105,11 +107,15 @@ func init() { panic(err) } + topologyPlanCmd.Deprecated = "it will be removed in one of the upcoming releases.\n" + topologyCmd.AddCommand(topologyPlanCmd) } func runTopologyPlan() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -118,16 +124,16 @@ func runTopologyPlan() error { for _, f := range tp.files { raw, err := os.ReadFile(f) //nolint:gosec if err != nil { - return errors.Wrapf(err, "failed to read input file %q", f) + return pkgerrors.Wrapf(err, "failed to read input file %q", f) } objects, err := utilyaml.ToUnstructured(raw) if err != nil { - return errors.Wrapf(err, "failed to convert file %q to list of objects", f) + return pkgerrors.Wrapf(err, "failed to convert file %q to list of objects", f) } objs = append(objs, objects...) } - out, err := c.TopologyPlan(client.TopologyPlanOptions{ + out, err := c.TopologyPlan(ctx, client.TopologyPlanOptions{ Kubeconfig: client.Kubeconfig{Path: tp.kubeconfig, Context: tp.kubeconfigContext}, Objs: convertToPtrSlice(objs), Cluster: tp.cluster, @@ -151,7 +157,7 @@ func printTopologyPlanOutput(out *cluster.TopologyPlanOutput, outdir string) err } else { printChangeSummary(out) if err := writeOutputFiles(out, outdir); err != nil { - return errors.Wrap(err, "failed to write output files of target cluster changes") + return pkgerrors.Wrap(err, "failed to write output files of target cluster changes") } } fmt.Printf("\n") @@ -230,17 +236,17 @@ func writeOutputFiles(out *cluster.TopologyPlanOutput, outDir string) error { // Write created files createdDir := path.Join(outDir, "created") if err := os.MkdirAll(createdDir, 0750); err != nil { - return errors.Wrapf(err, "failed to create %q directory", createdDir) + return pkgerrors.Wrapf(err, "failed to create %q directory", createdDir) } for _, c := range out.Created { yaml, err := utilyaml.FromUnstructured([]unstructured.Unstructured{*c}) if err != nil { - return errors.Wrap(err, "failed to convert object to yaml") + return pkgerrors.Wrap(err, "failed to convert object to yaml") } fileName := fmt.Sprintf("%s_%s_%s.yaml", c.GetKind(), c.GetNamespace(), c.GetName()) filePath := path.Join(createdDir, fileName) if err := os.WriteFile(filePath, yaml, 0600); err != nil { - return errors.Wrapf(err, "failed to write yaml to file %q", filePath) + return pkgerrors.Wrapf(err, "failed to write yaml to file %q", filePath) } } if len(out.Created) != 0 { @@ -250,33 +256,33 @@ func writeOutputFiles(out *cluster.TopologyPlanOutput, outDir string) error { // Write modified files modifiedDir := path.Join(outDir, "modified") if err := os.MkdirAll(modifiedDir, 0750); err != nil { - return errors.Wrapf(err, "failed to create %q directory", modifiedDir) + return pkgerrors.Wrapf(err, "failed to create %q directory", modifiedDir) } for _, m := range out.Modified { // Write the modified object to file. fileNameModified := fmt.Sprintf("%s_%s_%s.modified.yaml", m.After.GetKind(), m.After.GetNamespace(), m.After.GetName()) filePathModified := path.Join(modifiedDir, fileNameModified) if err := writeObjectToFile(filePathModified, m.After); err != nil { - return errors.Wrap(err, "failed to write modified object to file") + return pkgerrors.Wrap(err, "failed to write modified object to file") } // Write the original object to file. fileNameOriginal := fmt.Sprintf("%s_%s_%s.original.yaml", m.Before.GetKind(), m.Before.GetNamespace(), m.Before.GetName()) filePathOriginal := path.Join(modifiedDir, fileNameOriginal) if err := writeObjectToFile(filePathOriginal, m.Before); err != nil { - return errors.Wrap(err, "failed to write original object to file") + return pkgerrors.Wrap(err, "failed to write original object to file") } // Calculate the jsonpatch and write to a file. patch := crclient.MergeFrom(m.Before) jsonPatch, err := patch.Data(m.After) if err != nil { - return errors.Wrapf(err, "failed to calculate jsonpatch of modified object %s/%s", m.After.GetNamespace(), m.After.GetName()) + return pkgerrors.Wrapf(err, "failed to calculate jsonpatch of modified object %s/%s", m.After.GetNamespace(), m.After.GetName()) } patchFileName := fmt.Sprintf("%s_%s_%s.jsonpatch", m.After.GetKind(), m.After.GetNamespace(), m.After.GetName()) patchFilePath := path.Join(modifiedDir, patchFileName) if err := os.WriteFile(patchFilePath, jsonPatch, 0600); err != nil { - return errors.Wrapf(err, "failed to write jsonpatch to file %q", patchFilePath) + return pkgerrors.Wrapf(err, "failed to write jsonpatch to file %q", patchFilePath) } // Calculate the diff and write to a file. @@ -284,10 +290,10 @@ func writeOutputFiles(out *cluster.TopologyPlanOutput, outDir string) error { diffFilePath := path.Join(modifiedDir, diffFileName) diffFile, err := os.OpenFile(filepath.Clean(diffFilePath), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { - return errors.Wrapf(err, "unable to open file %q", diffFilePath) + return pkgerrors.Wrapf(err, "unable to open file %q", diffFilePath) } if err := writeDiffToFile(filePathOriginal, filePathModified, diffFile); err != nil { - return errors.Wrapf(err, "failed to write diff to file %q", diffFilePath) + return pkgerrors.Wrapf(err, "failed to write diff to file %q", diffFilePath) } } if len(out.Modified) != 0 { @@ -300,10 +306,10 @@ func writeOutputFiles(out *cluster.TopologyPlanOutput, outDir string) error { func writeObjectToFile(filePath string, obj *unstructured.Unstructured) error { yaml, err := utilyaml.FromUnstructured([]unstructured.Unstructured{*obj}) if err != nil { - return errors.Wrap(err, "failed to convert object to yaml") + return pkgerrors.Wrap(err, "failed to convert object to yaml") } if err := os.WriteFile(filePath, yaml, 0600); err != nil { - return errors.Wrapf(err, "failed to write yaml to file %q", filePath) + return pkgerrors.Wrapf(err, "failed to write yaml to file %q", filePath) } return nil } @@ -345,7 +351,7 @@ func writeDiffToFile(from, to string, out io.Writer) error { cmd.SetStdout(out) if err := cmd.Run(); err != nil && !isDiffError(err) { - return errors.Wrapf(err, "failed to run %q", diff) + return pkgerrors.Wrapf(err, "failed to run %q", diff) } return nil } @@ -379,7 +385,8 @@ func getDiffCommand(args ...string) (string, exec.Cmd) { // This makes use of the exit code of diff programs which is 0 for no diff, 1 for // modified and 2 for other errors. func isDiffError(err error) bool { - if err, ok := err.(exec.ExitError); ok && err.ExitStatus() <= 1 { + var exitErr exec.ExitError + if errors.As(err, &exitErr) && exitErr.ExitStatus() <= 1 { return true } return false diff --git a/cmd/clusterctl/cmd/upgrade.go b/cmd/clusterctl/cmd/upgrade.go index 2a4841fcc3af..7721d8a04c8e 100644 --- a/cmd/clusterctl/cmd/upgrade.go +++ b/cmd/clusterctl/cmd/upgrade.go @@ -29,7 +29,7 @@ var upgradeCmd = &cobra.Command{ GroupID: groupManagement, Short: "Upgrade core and provider components in a management cluster", Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { return cmd.Help() }, } diff --git a/cmd/clusterctl/cmd/upgrade_apply.go b/cmd/clusterctl/cmd/upgrade_apply.go index 41175669f4f7..402df535405d 100644 --- a/cmd/clusterctl/cmd/upgrade_apply.go +++ b/cmd/clusterctl/cmd/upgrade_apply.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "time" "github.com/pkg/errors" @@ -35,6 +36,7 @@ type upgradeApplyOptions struct { infrastructureProviders []string ipamProviders []string runtimeExtensionProviders []string + addonProviders []string waitProviders bool waitProviderTimeout int } @@ -60,7 +62,7 @@ var upgradeApplyCmd = &cobra.Command{ # Upgrades only the aws provider to the v2.0.1 version. clusterctl upgrade apply --infrastructure aws:v2.0.1`), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runUpgradeApply() }, } @@ -85,6 +87,8 @@ func init() { "IPAM providers and versions (e.g. infoblox:v0.0.1) to upgrade to. This flag can be used as alternative to --contract.") upgradeApplyCmd.Flags().StringSliceVar(&ua.runtimeExtensionProviders, "runtime-extension", nil, "Runtime extension providers and versions (e.g. test:v0.0.1) to upgrade to. This flag can be used as alternative to --contract.") + upgradeApplyCmd.Flags().StringSliceVar(&ua.addonProviders, "addon", nil, + "Add-on providers and versions (e.g. helm:v0.1.0) to upgrade to. This flag can be used as alternative to --contract.") upgradeApplyCmd.Flags().BoolVar(&ua.waitProviders, "wait-providers", false, "Wait for providers to be upgraded.") upgradeApplyCmd.Flags().IntVar(&ua.waitProviderTimeout, "wait-provider-timeout", 5*60, @@ -92,7 +96,9 @@ func init() { } func runUpgradeApply() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -102,16 +108,17 @@ func runUpgradeApply() error { (len(ua.controlPlaneProviders) > 0) || (len(ua.infrastructureProviders) > 0) || (len(ua.ipamProviders) > 0) || - (len(ua.runtimeExtensionProviders) > 0) + (len(ua.runtimeExtensionProviders) > 0) || + (len(ua.addonProviders) > 0) if ua.contract == "" && !hasProviderNames { - return errors.New("Either the --contract flag or at least one of the following flags has to be set: --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension") + return errors.New("Either the --contract flag or at least one of the following flags has to be set: --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon") } if ua.contract != "" && hasProviderNames { - return errors.New("The --contract flag can't be used in combination with --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension") + return errors.New("The --contract flag can't be used in combination with --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon") } - return c.ApplyUpgrade(client.ApplyUpgradeOptions{ + return c.ApplyUpgrade(ctx, client.ApplyUpgradeOptions{ Kubeconfig: client.Kubeconfig{Path: ua.kubeconfig, Context: ua.kubeconfigContext}, Contract: ua.contract, CoreProvider: ua.coreProvider, @@ -120,6 +127,7 @@ func runUpgradeApply() error { InfrastructureProviders: ua.infrastructureProviders, IPAMProviders: ua.ipamProviders, RuntimeExtensionProviders: ua.runtimeExtensionProviders, + AddonProviders: ua.addonProviders, WaitProviders: ua.waitProviders, WaitProviderTimeout: time.Duration(ua.waitProviderTimeout) * time.Second, }) diff --git a/cmd/clusterctl/cmd/upgrade_plan.go b/cmd/clusterctl/cmd/upgrade_plan.go index 00f711e8a53c..ab1cb989ac4a 100644 --- a/cmd/clusterctl/cmd/upgrade_plan.go +++ b/cmd/clusterctl/cmd/upgrade_plan.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "os" "text/tabwriter" @@ -52,7 +53,7 @@ var upgradePlanCmd = &cobra.Command{ # Gets the recommended target versions for upgrading Cluster API providers. clusterctl upgrade plan`), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runUpgradePlan() }, } @@ -65,12 +66,14 @@ func init() { } func runUpgradePlan() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - certManUpgradePlan, err := c.PlanCertManagerUpgrade(client.PlanUpgradeOptions{ + certManUpgradePlan, err := c.PlanCertManagerUpgrade(ctx, client.PlanUpgradeOptions{ Kubeconfig: client.Kubeconfig{Path: up.kubeconfig, Context: up.kubeconfigContext}, }) if err != nil { @@ -84,7 +87,7 @@ func runUpgradePlan() error { } } - upgradePlans, err := c.PlanUpgrade(client.PlanUpgradeOptions{ + upgradePlans, err := c.PlanUpgrade(ctx, client.PlanUpgradeOptions{ Kubeconfig: client.Kubeconfig{Path: up.kubeconfig, Context: up.kubeconfigContext}, }) diff --git a/cmd/clusterctl/cmd/util.go b/cmd/clusterctl/cmd/util.go index 2ab45cbb653c..01b65357fd44 100644 --- a/cmd/clusterctl/cmd/util.go +++ b/cmd/clusterctl/cmd/util.go @@ -27,7 +27,7 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/cmd/clusterctl/client" ) @@ -63,7 +63,7 @@ func printVariablesOutput(template client.Template, options client.GetClusterTem if variableMap[name] != nil { v := *variableMap[name] // Add quotes around any unquoted strings - if len(v) > 0 && !strings.HasPrefix(v, "\"") { + if v != "" && !strings.HasPrefix(v, "\"") { v = fmt.Sprintf("%q", v) variableMap[name] = &v } @@ -74,42 +74,42 @@ func printVariablesOutput(template client.Template, options client.GetClusterTem switch name { case "CLUSTER_NAME": // Cluster name from the cmd arguments is used instead of template default. - variableMap[name] = pointer.String(options.ClusterName) + variableMap[name] = ptr.To(options.ClusterName) case "NAMESPACE": // Namespace name from the cmd flags or from the kubeconfig is used instead of template default. if options.TargetNamespace != "" { - variableMap[name] = pointer.String(options.TargetNamespace) + variableMap[name] = ptr.To(options.TargetNamespace) } else { - variableMap[name] = pointer.String("current Namespace in the KubeConfig file") + variableMap[name] = ptr.To("current Namespace in the KubeConfig file") } case "CONTROL_PLANE_MACHINE_COUNT": // Control plane machine count uses the cmd flag, env variable or a constant is used instead of template default. if options.ControlPlaneMachineCount == nil { if val, ok := os.LookupEnv("CONTROL_PLANE_MACHINE_COUNT"); ok { - variableMap[name] = pointer.String(val) + variableMap[name] = ptr.To(val) } else { - variableMap[name] = pointer.String("1") + variableMap[name] = ptr.To("1") } } else { - variableMap[name] = pointer.String(strconv.FormatInt(*options.ControlPlaneMachineCount, 10)) + variableMap[name] = ptr.To(strconv.FormatInt(*options.ControlPlaneMachineCount, 10)) } case "WORKER_MACHINE_COUNT": // Worker machine count uses the cmd flag, env variable or a constant is used instead of template default. if options.WorkerMachineCount == nil { if val, ok := os.LookupEnv("WORKER_MACHINE_COUNT"); ok { - variableMap[name] = pointer.String(val) + variableMap[name] = ptr.To(val) } else { - variableMap[name] = pointer.String("0") + variableMap[name] = ptr.To("0") } } else { - variableMap[name] = pointer.String(strconv.FormatInt(*options.WorkerMachineCount, 10)) + variableMap[name] = ptr.To(strconv.FormatInt(*options.WorkerMachineCount, 10)) } case "KUBERNETES_VERSION": // Kubernetes version uses the cmd flag, env variable, or the template default. if options.KubernetesVersion != "" { - variableMap[name] = pointer.String(options.KubernetesVersion) + variableMap[name] = ptr.To(options.KubernetesVersion) } else if val, ok := os.LookupEnv("KUBERNETES_VERSION"); ok { - variableMap[name] = pointer.String(val) + variableMap[name] = ptr.To(val) } } diff --git a/cmd/clusterctl/cmd/version.go b/cmd/clusterctl/cmd/version.go index 1edf9e4c1805..52635ada126e 100644 --- a/cmd/clusterctl/cmd/version.go +++ b/cmd/clusterctl/cmd/version.go @@ -43,7 +43,7 @@ var versionCmd = &cobra.Command{ GroupID: groupOther, Short: "Print clusterctl version", Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runVersion() }, } diff --git a/cmd/clusterctl/cmd/version_checker.go b/cmd/clusterctl/cmd/version_checker.go index 8aee0ef0607c..7876e4e33a6d 100644 --- a/cmd/clusterctl/cmd/version_checker.go +++ b/cmd/clusterctl/cmd/version_checker.go @@ -20,20 +20,22 @@ import ( "context" "fmt" "os" + "path" "path/filepath" "regexp" "strings" "time" - "github.com/blang/semver" - "github.com/google/go-github/v48/github" + "github.com/adrg/xdg" + "github.com/blang/semver/v4" + "github.com/google/go-github/v53/github" "github.com/pkg/errors" "golang.org/x/oauth2" - "k8s.io/client-go/util/homedir" "sigs.k8s.io/yaml" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" + "sigs.k8s.io/cluster-api/internal/goproxy" "sigs.k8s.io/cluster-api/version" ) @@ -47,28 +49,40 @@ type versionChecker struct { versionFilePath string cliVersion func() version.Info githubClient *github.Client + goproxyClient *goproxy.Client } // newVersionChecker returns a versionChecker. Its behavior has been inspired // by https://github.com/cli/cli. -func newVersionChecker(vc config.VariablesClient) *versionChecker { - var client *github.Client +func newVersionChecker(ctx context.Context, vc config.VariablesClient) (*versionChecker, error) { + var githubClient *github.Client token, err := vc.Get("GITHUB_TOKEN") if err == nil { ts := oauth2.StaticTokenSource( &oauth2.Token{AccessToken: token}, ) - tc := oauth2.NewClient(context.TODO(), ts) - client = github.NewClient(tc) + tc := oauth2.NewClient(ctx, ts) + githubClient = github.NewClient(tc) } else { - client = github.NewClient(nil) + githubClient = github.NewClient(nil) + } + + var goproxyClient *goproxy.Client + if scheme, host, err := goproxy.GetSchemeAndHost(os.Getenv("GOPROXY")); err == nil && scheme != "" && host != "" { + goproxyClient = goproxy.NewClient(scheme, host) + } + + configDirectory, err := xdg.ConfigFile(config.ConfigFolderXDG) + if err != nil { + return nil, err } return &versionChecker{ - versionFilePath: filepath.Join(homedir.HomeDir(), config.ConfigFolder, "version.yaml"), + versionFilePath: filepath.Join(configDirectory, "version.yaml"), cliVersion: version.Get, - githubClient: client, - } + githubClient: githubClient, + goproxyClient: goproxyClient, + }, nil } // ReleaseInfo stores information about the release. @@ -87,16 +101,16 @@ type VersionState struct { // latest available release for CAPI // (https://github.com/kubernetes-sigs/cluster-api). It gets the latest // release from github at most once during a 24 hour period and caches the -// state by default in $HOME/.cluster-api/state.yaml. If the clusterctl +// state by default in $XDG_CONFIG_HOME/cluster-api/state.yaml. If the clusterctl // version is the same or greater it returns nothing. -func (v *versionChecker) Check() (string, error) { +func (v *versionChecker) Check(ctx context.Context) (string, error) { log := logf.Log cliVer, err := semver.ParseTolerant(v.cliVersion().GitVersion) if err != nil { return "", errors.Wrap(err, "unable to semver parse clusterctl GitVersion") } - release, err := v.getLatestRelease() + release, err := v.getLatestRelease(ctx) if err != nil { return "", err } @@ -110,14 +124,14 @@ func (v *versionChecker) Check() (string, error) { // if we are using a dirty dev build, just log it out if strings.HasSuffix(cliVer.String(), "-dirty") { - log.V(1).Info("⚠️ Using a development build of clusterctl.", "CLIVersion", cliVer.String(), "LatestGithubRelease", release.Version) + log.V(1).Info("⚠️ Using a development build of clusterctl.", "cliVersion", cliVer.String(), "latestGithubRelease", release.Version) return "", nil } // if the cli version is a dev build off of the latest available release, // the just log it out as informational. if strings.HasPrefix(cliVer.String(), latestVersion.String()) && gitVersionRegEx.MatchString(cliVer.String()) { - log.V(1).Info("⚠️ Using a development build of clusterctl.", "CLIVersion", cliVer.String(), "LatestGithubRelease", release.Version) + log.V(1).Info("⚠️ Using a development build of clusterctl.", "cliVersion", cliVer.String(), "latestGithubRelease", release.Version) return "", nil } @@ -132,30 +146,48 @@ New clusterctl version available: v%s -> v%s return "", nil } -func (v *versionChecker) getLatestRelease() (*ReleaseInfo, error) { +func (v *versionChecker) getLatestRelease(ctx context.Context) (*ReleaseInfo, error) { log := logf.Log + + // Try to get latest clusterctl version number from the local state file. + // NOTE: local state file is ignored if older than 1d. vs, err := readStateFile(v.versionFilePath) if err != nil { return nil, errors.Wrap(err, "unable to read version state file") } + if vs != nil { + return &vs.LatestRelease, nil + } - // if there is no release info in the state file, pull latest release from github - if vs == nil { - release, _, err := v.githubClient.Repositories.GetLatestRelease(context.TODO(), "kubernetes-sigs", "cluster-api") - if err != nil { - log.V(1).Info("⚠️ Unable to get latest github release for clusterctl") - // failing silently here so we don't error out in air-gapped - // environments. - return nil, nil //nolint:nilerr + // Try to get latest clusterctl version number from go modules. + latest, err := v.goproxyGetLatest(ctx) + if err != nil { + log.V(5).Info("error using Goproxy client to get latest versions for clusterctl, falling back to github client") + } + if latest != nil { + vs = &VersionState{ + LastCheck: time.Now(), + LatestRelease: *latest, } - vs = &VersionState{ - LastCheck: time.Now(), - LatestRelease: ReleaseInfo{ - Version: release.GetTagName(), - URL: release.GetHTMLURL(), - }, + if err := writeStateFile(v.versionFilePath, vs); err != nil { + return nil, errors.Wrap(err, "unable to write version state file") } + return &vs.LatestRelease, nil + } + + // Otherwise fall back to get latest clusterctl version number from GitHub. + latest, err = v.gitHubGetLatest(ctx) + if err != nil { + log.V(1).Info("⚠️ Unable to get latest github release for clusterctl") + // failing silently here so we don't error out in air-gapped + // environments. + return nil, nil //nolint:nilerr + } + + vs = &VersionState{ + LastCheck: time.Now(), + LatestRelease: *latest, } if err := writeStateFile(v.versionFilePath, vs); err != nil { @@ -165,6 +197,40 @@ func (v *versionChecker) getLatestRelease() (*ReleaseInfo, error) { return &vs.LatestRelease, nil } +func (v *versionChecker) goproxyGetLatest(ctx context.Context) (*ReleaseInfo, error) { + if v.goproxyClient == nil { + return nil, nil + } + + gomodulePath := path.Join("sigs.k8s.io", "cluster-api") + versions, err := v.goproxyClient.GetVersions(ctx, gomodulePath) + if err != nil { + return nil, err + } + + latest := semver.Version{} + for _, v := range versions { + if v.GT(latest) { + latest = v + } + } + return &ReleaseInfo{ + Version: latest.String(), + URL: gomodulePath, + }, nil +} + +func (v *versionChecker) gitHubGetLatest(ctx context.Context) (*ReleaseInfo, error) { + release, _, err := v.githubClient.Repositories.GetLatestRelease(ctx, "kubernetes-sigs", "cluster-api") + if err != nil { + return nil, err + } + return &ReleaseInfo{ + Version: release.GetTagName(), + URL: release.GetHTMLURL(), + }, nil +} + func writeStateFile(path string, vs *VersionState) error { vsb, err := yaml.Marshal(vs) if err != nil { diff --git a/cmd/clusterctl/cmd/version_checker_test.go b/cmd/clusterctl/cmd/version_checker_test.go index 0ca77cfad89a..a237c232447e 100644 --- a/cmd/clusterctl/cmd/version_checker_test.go +++ b/cmd/clusterctl/cmd/version_checker_test.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "net/http" "os" @@ -24,8 +25,8 @@ import ( "testing" "time" + "github.com/adrg/xdg" . "github.com/onsi/gomega" - "k8s.io/client-go/util/homedir" "sigs.k8s.io/yaml" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" @@ -35,9 +36,17 @@ import ( func TestVersionChecker_newVersionChecker(t *testing.T) { g := NewWithT(t) - versionChecker := newVersionChecker(test.NewFakeVariableClient()) + ctx := context.Background() - expectedStateFilePath := filepath.Join(homedir.HomeDir(), ".cluster-api", "version.yaml") + versionChecker, err := newVersionChecker(ctx, test.NewFakeVariableClient()) + + g.Expect(err).ToNot(HaveOccurred()) + + configHome, err := xdg.ConfigFile("cluster-api") + + g.Expect(err).ToNot(HaveOccurred()) + + expectedStateFilePath := filepath.Join(configHome, "version.yaml") g.Expect(versionChecker.versionFilePath).To(Equal(expectedStateFilePath)) g.Expect(versionChecker.cliVersion).ToNot(BeNil()) g.Expect(versionChecker.githubClient).ToNot(BeNil()) @@ -227,6 +236,9 @@ https://github.com/foo/bar/releases/v0.3.8-alpha.1 for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() @@ -240,12 +252,15 @@ https://github.com/foo/bar/releases/v0.3.8-alpha.1 }, ) defer cleanup() - versionChecker := newVersionChecker(test.NewFakeVariableClient()) + versionChecker, err := newVersionChecker(ctx, test.NewFakeVariableClient()) + g.Expect(err).ToNot(HaveOccurred()) + versionChecker.cliVersion = tt.cliVersion versionChecker.githubClient = fakeGithubClient + versionChecker.goproxyClient = nil versionChecker.versionFilePath = tmpVersionFile - output, err := versionChecker.Check() + output, err := versionChecker.Check(ctx) if tt.expectErr { g.Expect(err).To(HaveOccurred()) @@ -259,6 +274,9 @@ https://github.com/foo/bar/releases/v0.3.8-alpha.1 func TestVersionChecker_WriteStateFile(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + fakeGithubClient, mux, cleanup := test.NewFakeGitHub() mux.HandleFunc( "/repos/kubernetes-sigs/cluster-api/releases/latest", @@ -272,11 +290,12 @@ func TestVersionChecker_WriteStateFile(t *testing.T) { tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() - versionChecker := newVersionChecker(test.NewFakeVariableClient()) + versionChecker, err := newVersionChecker(ctx, test.NewFakeVariableClient()) + g.Expect(err).ToNot(HaveOccurred()) versionChecker.versionFilePath = tmpVersionFile versionChecker.githubClient = fakeGithubClient - release, err := versionChecker.getLatestRelease() + release, err := versionChecker.getLatestRelease(ctx) g.Expect(err).ToNot(HaveOccurred()) // ensure that the state file has been created @@ -285,12 +304,14 @@ func TestVersionChecker_WriteStateFile(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) var actualVersionState VersionState g.Expect(yaml.Unmarshal(fb, &actualVersionState)).To(Succeed()) - g.Expect(actualVersionState.LatestRelease).To(Equal(*release)) + g.Expect(actualVersionState.LatestRelease).To(BeComparableTo(*release)) } func TestVersionChecker_ReadFromStateFile(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() @@ -303,13 +324,15 @@ func TestVersionChecker_ReadFromStateFile(t *testing.T) { }, ) defer cleanup1() - versionChecker := newVersionChecker(test.NewFakeVariableClient()) + versionChecker, err := newVersionChecker(ctx, test.NewFakeVariableClient()) + g.Expect(err).ToNot(HaveOccurred()) versionChecker.versionFilePath = tmpVersionFile versionChecker.githubClient = fakeGithubClient1 + versionChecker.goproxyClient = nil // this call to getLatestRelease will pull from our fakeGithubClient1 and // store the information including timestamp into the state file. - _, err := versionChecker.getLatestRelease() + _, err = versionChecker.getLatestRelease(ctx) g.Expect(err).ToNot(HaveOccurred()) // override the github client with response to a new version v0.3.99 @@ -317,7 +340,7 @@ func TestVersionChecker_ReadFromStateFile(t *testing.T) { fakeGithubClient2, mux2, cleanup2 := test.NewFakeGitHub() mux2.HandleFunc( "/repos/kubernetes-sigs/cluster-api/releases/latest", - func(w http.ResponseWriter, r *http.Request) { + func(w http.ResponseWriter, _ *http.Request) { githubCalled = true fmt.Fprint(w, `{"tag_name": "v0.3.99", "html_url": "https://github.com/foo/bar/releases/v0.3.99"}`) }, @@ -327,7 +350,7 @@ func TestVersionChecker_ReadFromStateFile(t *testing.T) { // now instead of making another call to github, we want to read from the // file. This will avoid unnecessary calls to github. - release, err := versionChecker.getLatestRelease() + release, err := versionChecker.getLatestRelease(ctx) g.Expect(err).ToNot(HaveOccurred()) g.Expect(release.Version).To(Equal("v0.3.8")) g.Expect(release.URL).To(Equal("https://github.com/foo/bar/releases/v0.3.8")) @@ -337,6 +360,8 @@ func TestVersionChecker_ReadFromStateFile(t *testing.T) { func TestVersionChecker_ReadFromStateFileWithin24Hrs(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() @@ -359,16 +384,18 @@ func TestVersionChecker_ReadFromStateFileWithin24Hrs(t *testing.T) { }, ) defer cleanup1() - versionChecker := newVersionChecker(test.NewFakeVariableClient()) + versionChecker, err := newVersionChecker(ctx, test.NewFakeVariableClient()) + g.Expect(err).ToNot(HaveOccurred()) versionChecker.versionFilePath = tmpVersionFile versionChecker.githubClient = fakeGithubClient1 + versionChecker.goproxyClient = nil - _, err := versionChecker.getLatestRelease() + _, err = versionChecker.getLatestRelease(ctx) g.Expect(err).ToNot(HaveOccurred()) // Since the state file is more that 24 hours old we want to retrieve the // latest release from github. - release, err := versionChecker.getLatestRelease() + release, err := versionChecker.getLatestRelease(ctx) g.Expect(err).ToNot(HaveOccurred()) g.Expect(release.Version).To(Equal("v0.3.10")) g.Expect(release.URL).To(Equal("https://github.com/foo/bar/releases/v0.3.10")) @@ -376,7 +403,7 @@ func TestVersionChecker_ReadFromStateFileWithin24Hrs(t *testing.T) { func generateTempVersionFilePath(g *WithT) (string, func()) { dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // don't create the state file, just have a path to the file tmpVersionFile := filepath.Join(dir, "clusterctl", "state.yaml") diff --git a/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_metadata.yaml b/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_metadata.yaml index ef22193bd469..b1ed1aa1cfa2 100644 --- a/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_metadata.yaml +++ b/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_metadata.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: metadata.clusterctl.cluster.x-k8s.io spec: group: clusterctl.cluster.x-k8s.io @@ -21,14 +20,19 @@ spec: description: Metadata for a provider repository. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -38,8 +42,11 @@ spec: with a API Version of Cluster API (contract). properties: contract: - description: "Contract defines the Cluster API contract supported - by this series. \n The value is an API Version, e.g. `v1alpha3`." + description: |- + Contract defines the Cluster API contract supported by this series. + + + The value is an API Version, e.g. `v1alpha3`. type: string major: description: Major version of the release series diff --git a/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_providers.yaml b/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_providers.yaml index 6a9f581b7330..4d211e3730f2 100644 --- a/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_providers.yaml +++ b/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_providers.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: providers.clusterctl.cluster.x-k8s.io spec: group: clusterctl.cluster.x-k8s.io @@ -37,14 +36,19 @@ spec: description: Provider defines an entry in the provider inventory. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -52,18 +56,20 @@ spec: description: ProviderName indicates the name of the provider. type: string type: - description: Type indicates the type of the provider. See ProviderType - for a list of supported values + description: |- + Type indicates the type of the provider. + See ProviderType for a list of supported values type: string version: description: Version indicates the component version. type: string watchedNamespace: - description: "WatchedNamespace indicates the namespace where the provider - controller is watching. If empty the provider controller is watching - for objects in all namespaces. \n Deprecated: in clusterctl v1alpha4 - all the providers watch all the namespaces; this field will be removed - in a future version of this API" + description: |- + WatchedNamespace indicates the namespace where the provider controller is watching. + If empty the provider controller is watching for objects in all namespaces. + + + Deprecated: providers complying with the Cluster API v1alpha4 contract or above must watch all namespaces; this field will be removed in a future version of this API type: string type: object served: true diff --git a/cmd/clusterctl/config/manifest/clusterctl-api.yaml b/cmd/clusterctl/config/manifest/clusterctl-api.yaml index d2564461cb03..a1d5e2719280 100644 --- a/cmd/clusterctl/config/manifest/clusterctl-api.yaml +++ b/cmd/clusterctl/config/manifest/clusterctl-api.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: providers.clusterctl.cluster.x-k8s.io spec: group: clusterctl.cluster.x-k8s.io @@ -36,14 +35,19 @@ spec: description: Provider defines an entry in the provider inventory. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -51,18 +55,20 @@ spec: description: ProviderName indicates the name of the provider. type: string type: - description: Type indicates the type of the provider. See ProviderType - for a list of supported values + description: |- + Type indicates the type of the provider. + See ProviderType for a list of supported values type: string version: description: Version indicates the component version. type: string watchedNamespace: - description: "WatchedNamespace indicates the namespace where the provider - controller is watching. If empty the provider controller is watching - for objects in all namespaces. \n Deprecated: in clusterctl v1alpha4 - all the providers watch all the namespaces; this field will be removed - in a future version of this API" + description: |- + WatchedNamespace indicates the namespace where the provider controller is watching. + If empty the provider controller is watching for objects in all namespaces. + + + Deprecated: providers complying with the Cluster API v1alpha4 contract or above must watch all namespaces; this field will be removed in a future version of this API type: string type: object served: true diff --git a/cmd/clusterctl/hack/create-local-repository.py b/cmd/clusterctl/hack/create-local-repository.py index 79b044a6a26f..35c755fd659f 100755 --- a/cmd/clusterctl/hack/create-local-repository.py +++ b/cmd/clusterctl/hack/create-local-repository.py @@ -40,48 +40,56 @@ from __future__ import unicode_literals +import sys +import errno import json -import subprocess import os +import subprocess +import urllib.request from distutils.dir_util import copy_tree from distutils.file_util import copy_file -import errno -import sys settings = {} providers = { - 'cluster-api': { - 'componentsFile': 'core-components.yaml', - 'nextVersion': 'v1.4.99', - 'type': 'CoreProvider', - }, - 'bootstrap-kubeadm': { - 'componentsFile': 'bootstrap-components.yaml', - 'nextVersion': 'v1.4.99', - 'type': 'BootstrapProvider', - 'configFolder': 'bootstrap/kubeadm/config/default', - }, - 'control-plane-kubeadm': { - 'componentsFile': 'control-plane-components.yaml', - 'nextVersion': 'v1.4.99', - 'type': 'ControlPlaneProvider', - 'configFolder': 'controlplane/kubeadm/config/default', - }, - 'infrastructure-docker': { - 'componentsFile': 'infrastructure-components.yaml', - 'nextVersion': 'v1.4.99', + 'cluster-api': { + 'componentsFile': 'core-components.yaml', + 'nextVersion': 'v1.7.99', + 'type': 'CoreProvider', + }, + 'bootstrap-kubeadm': { + 'componentsFile': 'bootstrap-components.yaml', + 'nextVersion': 'v1.7.99', + 'type': 'BootstrapProvider', + 'configFolder': 'bootstrap/kubeadm/config/default', + }, + 'control-plane-kubeadm': { + 'componentsFile': 'control-plane-components.yaml', + 'nextVersion': 'v1.7.99', + 'type': 'ControlPlaneProvider', + 'configFolder': 'controlplane/kubeadm/config/default', + }, + 'infrastructure-docker': { + 'componentsFile': 'infrastructure-components-development.yaml', + 'nextVersion': 'v1.7.99', + 'type': 'InfrastructureProvider', + 'configFolder': 'test/infrastructure/docker/config/default', + }, + 'infrastructure-in-memory': { + 'componentsFile': 'infrastructure-components-in-memory-development.yaml', + 'nextVersion': 'v1.7.99', 'type': 'InfrastructureProvider', - 'configFolder': 'test/infrastructure/docker/config/default', + 'configFolder': 'test/infrastructure/inmemory/config/default', }, 'runtime-extension-test': { - 'componentsFile': 'runtime-extension-components.yaml', - 'nextVersion': 'v1.4.99', - 'type': 'RuntimeExtensionProvider', - 'configFolder': 'test/extension/config/default', - }, + 'componentsFile': 'runtime-extension-components-development.yaml', + 'nextVersion': 'v1.7.99', + 'type': 'RuntimeExtensionProvider', + 'configFolder': 'test/extension/config/default', + }, } + def load_settings(): global settings try: @@ -89,6 +97,7 @@ def load_settings(): except Exception as e: raise Exception('failed to load clusterctl-settings.json: {}'.format(e)) + def load_providers(): provider_repos = settings.get('provider_repos', []) for repo in provider_repos: @@ -102,26 +111,31 @@ def load_providers(): except Exception as e: raise Exception('failed to load clusterctl-settings.json from repo {}: {}'.format(repo, e)) + def execCmd(args): try: out = subprocess.Popen(args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) stdout, stderr = out.communicate() if stderr is not None: raise Exception('stderr contains: \n{}'.format(stderr)) return stdout - except Exception as e: + except Exception as e: raise Exception('failed to run {}: {}'.format(args, e)) -def get_home(): - return os.path.expanduser('~') def get_repository_folder(): - home = get_home() - return os.path.join(home, '.cluster-api', 'dev-repository') + config_dir = os.getenv("XDG_CONFIG_HOME", "") + if config_dir == "": + home_dir = os.getenv("HOME", "") + if home_dir == "": + raise Exception('HOME variable is not set') + config_dir = os.path.join(home_dir, ".config") + return os.path.join(config_dir, 'cluster-api', 'dev-repository') + def write_local_repository(provider, version, components_file, components_yaml, metadata_file): try: @@ -142,41 +156,60 @@ def write_local_repository(provider, version, components_file, components_yaml, if provider == "infrastructure-docker": copy_tree("test/infrastructure/docker/templates", provider_folder) + if provider == "infrastructure-in-memory": + copy_tree("test/infrastructure/inmemory/templates", provider_folder) + return components_path except Exception as e: raise Exception('failed to write {} to {}: {}'.format(components_file, provider_folder, e)) + def create_local_repositories(): providerList = settings.get('providers', []) assert providerList is not None, 'invalid configuration: please define the list of providers to override' assert len(providerList)>0, 'invalid configuration: please define at least one provider to override' + if len(sys.argv) == 1: + execCmd(['make', 'kustomize']) + for provider in providerList: p = providers.get(provider) - assert p is not None, 'invalid configuration: please specify the configuration for the {} provider'.format(provider) + assert p is not None, 'invalid configuration: please specify the configuration for the {} provider'.format( + provider) repo = p.get('repo', '.') config_folder = p.get('configFolder', 'config/default') - metadata_file = repo+'/metadata.yaml' + metadata_file = repo + '/metadata.yaml' next_version = p.get('nextVersion') - assert next_version is not None, 'invalid configuration for provider {}: please provide nextVersion value'.format(provider) + assert next_version is not None, 'invalid configuration for provider {}: please provide nextVersion value'.format( + provider) name, type = splitNameAndType(provider) - assert name is not None, 'invalid configuration for provider {}: please use a valid provider label'.format(provider) + assert name is not None, 'invalid configuration for provider {}: please use a valid provider label'.format( + provider) components_file = p.get('componentsFile') - assert components_file is not None, 'invalid configuration for provider {}: please provide componentsFile value'.format(provider) + assert components_file is not None, 'invalid configuration for provider {}: please provide componentsFile value'.format( + provider) + + if len(sys.argv) > 1: + url = "{}/{}".format(sys.argv[1], components_file) + components_yaml = urllib.request.urlopen(url).read() + else: + components_yaml = execCmd(['./hack/tools/bin/kustomize', 'build', os.path.join(repo, config_folder)]) - components_yaml = execCmd(['kustomize', 'build', os.path.join(repo, config_folder)]) - components_path = write_local_repository(provider, next_version, components_file, components_yaml, metadata_file) + components_path = write_local_repository(provider, next_version, components_file, components_yaml, + metadata_file) yield name, type, next_version, components_path + def injectLatest(path): head, tail = os.path.split(path) return '{}/latest/{}'.format(head, tail) + def create_dev_config(repos): yaml = "providers:\n" for name, type, next_version, components_path in repos: @@ -195,6 +228,7 @@ def create_dev_config(repos): except Exception as e: raise Exception('failed to write {}: {}'.format(config_path, e)) + def splitNameAndType(provider): if provider == 'cluster-api': return 'cluster-api', 'CoreProvider' @@ -208,26 +242,39 @@ def splitNameAndType(provider): return provider[len('ipam-'):], 'IPAMProvider' if provider.startswith('runtime-extension-'): return provider[len('runtime-extension-'):], 'RuntimeExtensionProvider' + if provider.startswith('addon-'): + return provider[len('addon-'):], 'AddonProvider' return None, None + def CoreProviderFlag(): return '--core' + def BootstrapProviderFlag(): return '--bootstrap' + def ControlPlaneProviderFlag(): return '--control-plane' + def InfrastructureProviderFlag(): return '--infrastructure' + def IPAMProviderFlag(): return '--ipam' + def RuntimeExtensionProviderFlag(): return '--runtime-extension' + +def AddonProviderFlag(): + return '--addon' + + def type_to_flag(type): switcher = { 'CoreProvider': CoreProviderFlag, @@ -235,24 +282,34 @@ def type_to_flag(type): 'ControlPlaneProvider': ControlPlaneProviderFlag, 'InfrastructureProvider': InfrastructureProviderFlag, 'IPAMProvider': IPAMProviderFlag, - 'RuntimeExtensionProvider': RuntimeExtensionProviderFlag + 'RuntimeExtensionProvider': RuntimeExtensionProviderFlag, + 'AddonProvider': AddonProviderFlag } func = switcher.get(type, lambda: 'Invalid type') return func() + def print_instructions(repos): providerList = settings.get('providers', []) - print ('clusterctl local overrides generated from local repositories for the {} providers.'.format(', '.join(providerList))) - print ('in order to use them, please run:') + print('clusterctl local overrides generated from local repositories for the {} providers.'.format( + ', '.join(providerList))) + print('in order to use them, please run:') print cmd = "clusterctl init \\\n" for name, type, next_version, components_path in repos: cmd += " {} {}:{} \\\n".format(type_to_flag(type), name, next_version) - cmd += " --config ~/.cluster-api/dev-repository/config.yaml" - print (cmd) + config_dir = os.getenv("XDG_CONFIG_HOME", "") + if config_dir != "": + cmd += " --config $XDG_CONFIG_HOME/cluster-api/dev-repository/config.yaml" + else: + cmd += " --config $HOME/.config/cluster-api/dev-repository/config.yaml" + print(cmd) print if 'infrastructure-docker' in providerList: - print ('please check the documentation for additional steps required for using the docker provider') + print('please check the documentation for additional steps required for using the docker provider') + print + if 'infrastructure-in-memory' in providerList: + print ('please check the documentation for additional steps required for using the in-memory provider') print diff --git a/cmd/clusterctl/internal/test/contracts.go b/cmd/clusterctl/internal/test/contracts.go index 6b032b2c566e..ba8ab44a988b 100644 --- a/cmd/clusterctl/internal/test/contracts.go +++ b/cmd/clusterctl/internal/test/contracts.go @@ -17,12 +17,11 @@ limitations under the License. package test import ( - clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // PreviousCAPIContractNotSupported define the previous Cluster API contract, not supported by this release of clusterctl. -var PreviousCAPIContractNotSupported = clusterv1alpha4.GroupVersion.Version +var PreviousCAPIContractNotSupported = "v1alpha4" // CurrentCAPIContract define the current Cluster API contract. var CurrentCAPIContract = clusterv1.GroupVersion.Version diff --git a/cmd/clusterctl/internal/test/fake_github.go b/cmd/clusterctl/internal/test/fake_github.go index 2666bba2ffac..e9e2e83cbe51 100644 --- a/cmd/clusterctl/internal/test/fake_github.go +++ b/cmd/clusterctl/internal/test/fake_github.go @@ -21,7 +21,7 @@ import ( "net/http/httptest" "net/url" - "github.com/google/go-github/v48/github" + "github.com/google/go-github/v53/github" ) const baseURLPath = "/api-v3" diff --git a/cmd/clusterctl/internal/test/fake_objects.go b/cmd/clusterctl/internal/test/fake_objects.go index 8bb404ce6791..0299d67dd9c9 100644 --- a/cmd/clusterctl/internal/test/fake_objects.go +++ b/cmd/clusterctl/internal/test/fake_objects.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -39,6 +40,7 @@ import ( addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/internal/test/builder" + "sigs.k8s.io/cluster-api/util" ) type FakeCluster struct { @@ -267,7 +269,7 @@ func (f *FakeCluster) Objs() []client.Object { if f.controlPlane == nil && i == 0 { generateCerts = true } - objs = append(objs, machine.Objs(cluster, generateCerts, nil, nil)...) + objs = append(objs, machine.Objs(cluster, generateCerts, nil, nil, nil)...) } // Ensure all the objects gets UID. @@ -401,14 +403,16 @@ func (f *FakeControlPlane) Objs(cluster *clusterv1.Cluster) []client.Object { // Adds the objects for the machines controlled by the controlPlane for _, machine := range f.machines { - objs = append(objs, machine.Objs(cluster, false, nil, controlPlane)...) + objs = append(objs, machine.Objs(cluster, false, nil, nil, controlPlane)...) } return objs } type FakeMachinePool struct { - name string + name string + bootstrapConfig *clusterv1.Bootstrap + machines []*FakeMachine } // NewFakeMachinePool return a FakeMachinePool that can generate a MachinePool object, all its own ancillary objects: @@ -420,6 +424,16 @@ func NewFakeMachinePool(name string) *FakeMachinePool { } } +func (f *FakeMachinePool) WithStaticBootstrapConfig() *FakeMachinePool { + f.bootstrapConfig = NewStaticBootstrapConfig(f.name) + return f +} + +func (f *FakeMachinePool) WithMachines(fakeMachine ...*FakeMachine) *FakeMachinePool { + f.machines = append(f.machines, fakeMachine...) + return f +} + func (f *FakeMachinePool) Objs(cluster *clusterv1.Cluster) []client.Object { machinePoolInfrastructure := &fakeinfrastructure.GenericInfrastructureMachineTemplate{ TypeMeta: metav1.TypeMeta{ @@ -461,6 +475,11 @@ func (f *FakeMachinePool) Objs(cluster *clusterv1.Cluster) []client.Object { }, } + bootstrapConfig := f.bootstrapConfig + if bootstrapConfig == nil { + bootstrapConfig = NewBootstrapConfigTemplate(machinePoolBootstrap) + } + machinePool := &expv1.MachinePool{ TypeMeta: metav1.TypeMeta{ Kind: "MachinePool", @@ -490,14 +509,7 @@ func (f *FakeMachinePool) Objs(cluster *clusterv1.Cluster) []client.Object { Name: machinePoolInfrastructure.Name, Namespace: machinePoolInfrastructure.Namespace, }, - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - APIVersion: machinePoolBootstrap.APIVersion, - Kind: machinePoolBootstrap.Kind, - Name: machinePoolBootstrap.Name, - Namespace: machinePoolBootstrap.Namespace, - }, - }, + Bootstrap: *bootstrapConfig, }, }, ClusterName: cluster.Name, @@ -510,7 +522,15 @@ func (f *FakeMachinePool) Objs(cluster *clusterv1.Cluster) []client.Object { objs := []client.Object{ machinePool, machinePoolInfrastructure, - machinePoolBootstrap, + } + + // if the bootstrapConfig doesn't use a static secret, add the GenericBootstrapConfigTemplate to the object list + if bootstrapConfig.ConfigRef != nil { + objs = append(objs, machinePoolBootstrap) + } + + for _, machine := range f.machines { + objs = append(objs, machine.Objs(cluster, false, nil, machinePool, nil)...) } return objs @@ -530,10 +550,42 @@ func NewFakeInfrastructureTemplate(name string) *fakeinfrastructure.GenericInfra } } +// NewStaticBootstrapConfig return a clusterv1.Bootstrap where +// - the ConfigRef is nil +// - the DataSecretName contains the name of the static data secret. +func NewStaticBootstrapConfig(name string) *clusterv1.Bootstrap { + return &clusterv1.Bootstrap{ + DataSecretName: ptr.To(name + "-bootstrap-secret"), + } +} + +func NewBootstrapConfigTemplate(machineBootstrapTemplate *fakebootstrap.GenericBootstrapConfigTemplate) *clusterv1.Bootstrap { + return &clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: machineBootstrapTemplate.APIVersion, + Kind: machineBootstrapTemplate.Kind, + Name: machineBootstrapTemplate.Name, + Namespace: machineBootstrapTemplate.Namespace, + }, + } +} + +func NewBootstrapConfig(machineBootstrap *fakebootstrap.GenericBootstrapConfig) *clusterv1.Bootstrap { + return &clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: machineBootstrap.APIVersion, + Kind: machineBootstrap.Kind, + Name: machineBootstrap.Name, + Namespace: machineBootstrap.Namespace, + }, + } +} + type FakeMachineDeployment struct { name string machineSets []*FakeMachineSet sharedInfrastructureTemplate *fakeinfrastructure.GenericInfrastructureMachineTemplate + bootstrapConfig *clusterv1.Bootstrap } // NewFakeMachineDeployment return a FakeMachineDeployment that can generate a MachineDeployment object, all its own ancillary objects: @@ -551,6 +603,11 @@ func (f *FakeMachineDeployment) WithMachineSets(fakeMachineSet ...*FakeMachineSe return f } +func (f *FakeMachineDeployment) WithStaticBootstrapConfig() *FakeMachineDeployment { + f.bootstrapConfig = NewStaticBootstrapConfig(f.name) + return f +} + func (f *FakeMachineDeployment) WithInfrastructureTemplate(infrastructureTemplate *fakeinfrastructure.GenericInfrastructureMachineTemplate) *FakeMachineDeployment { f.sharedInfrastructureTemplate = infrastructureTemplate return f @@ -563,14 +620,14 @@ func (f *FakeMachineDeployment) Objs(cluster *clusterv1.Cluster) []client.Object machineDeploymentInfrastructure = NewFakeInfrastructureTemplate(f.name) } machineDeploymentInfrastructure.Namespace = cluster.Namespace - machineDeploymentInfrastructure.OwnerReferences = append(machineDeploymentInfrastructure.OwnerReferences, // Added by the machine set controller -- RECONCILED + machineDeploymentInfrastructure.SetOwnerReferences(util.EnsureOwnerRef(machineDeploymentInfrastructure.GetOwnerReferences(), // Added by the machine set controller -- RECONCILED metav1.OwnerReference{ APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: cluster.Name, UID: cluster.UID, }, - ) + )) setUID(machineDeploymentInfrastructure) machineDeploymentBootstrap := &fakebootstrap.GenericBootstrapConfigTemplate{ @@ -593,6 +650,11 @@ func (f *FakeMachineDeployment) Objs(cluster *clusterv1.Cluster) []client.Object }, } + bootstrapConfig := f.bootstrapConfig + if bootstrapConfig == nil { + bootstrapConfig = NewBootstrapConfigTemplate(machineDeploymentBootstrap) + } + machineDeployment := &clusterv1.MachineDeployment{ TypeMeta: metav1.TypeMeta{ Kind: "MachineDeployment", @@ -622,14 +684,7 @@ func (f *FakeMachineDeployment) Objs(cluster *clusterv1.Cluster) []client.Object Name: machineDeploymentInfrastructure.Name, Namespace: machineDeploymentInfrastructure.Namespace, }, - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - APIVersion: machineDeploymentBootstrap.APIVersion, - Kind: machineDeploymentBootstrap.Kind, - Name: machineDeploymentBootstrap.Name, - Namespace: machineDeploymentBootstrap.Namespace, - }, - }, + Bootstrap: *bootstrapConfig, }, }, ClusterName: cluster.Name, @@ -641,8 +696,13 @@ func (f *FakeMachineDeployment) Objs(cluster *clusterv1.Cluster) []client.Object objs := []client.Object{ machineDeployment, - machineDeploymentBootstrap, } + + // if the bootstrapConfig doesn't use a static secret, add the GenericBootstrapConfigTemplate to the object list + if bootstrapConfig.ConfigRef != nil { + objs = append(objs, machineDeploymentBootstrap) + } + // if the infra template is specific to the machine deployment, add it to the object list if f.sharedInfrastructureTemplate == nil { objs = append(objs, machineDeploymentInfrastructure) @@ -660,6 +720,7 @@ type FakeMachineSet struct { name string machines []*FakeMachine sharedInfrastructureTemplate *fakeinfrastructure.GenericInfrastructureMachineTemplate + bootstrapConfig *clusterv1.Bootstrap } // NewFakeMachineSet return a FakeMachineSet that can generate a MachineSet object, all its own ancillary objects: @@ -677,6 +738,11 @@ func (f *FakeMachineSet) WithMachines(fakeMachine ...*FakeMachine) *FakeMachineS return f } +func (f *FakeMachineSet) WithStaticBootstrapConfig() *FakeMachineSet { + f.bootstrapConfig = NewStaticBootstrapConfig(f.name) + return f +} + func (f *FakeMachineSet) WithInfrastructureTemplate(infrastructureTemplate *fakeinfrastructure.GenericInfrastructureMachineTemplate) *FakeMachineSet { f.sharedInfrastructureTemplate = infrastructureTemplate return f @@ -749,6 +815,8 @@ func (f *FakeMachineSet) Objs(cluster *clusterv1.Cluster, machineDeployment *clu Namespace: machineSetInfrastructure.Namespace, } + objs = append(objs, machineSet) + machineSetBootstrap := &fakebootstrap.GenericBootstrapConfigTemplate{ TypeMeta: metav1.TypeMeta{ APIVersion: fakebootstrap.GroupVersion.String(), @@ -769,16 +837,18 @@ func (f *FakeMachineSet) Objs(cluster *clusterv1.Cluster, machineDeployment *clu }, } - machineSet.Spec.Template.Spec.Bootstrap = clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - APIVersion: machineSetBootstrap.APIVersion, - Kind: machineSetBootstrap.Kind, - Name: machineSetBootstrap.Name, - Namespace: machineSetBootstrap.Namespace, - }, + bootstrapConfig := f.bootstrapConfig + if bootstrapConfig == nil { + bootstrapConfig = NewBootstrapConfigTemplate(machineSetBootstrap) + } + + machineSet.Spec.Template.Spec.Bootstrap = *bootstrapConfig + + // if the bootstrapConfig doesn't use a static secret, add the GenericBootstrapConfigTemplate to the object list + if bootstrapConfig.ConfigRef != nil { + objs = append(objs, machineSetBootstrap) } - objs = append(objs, machineSet, machineSetBootstrap) // if the infra template is specific to the machine set, add it to the object list if f.sharedInfrastructureTemplate == nil { objs = append(objs, machineSetInfrastructure) @@ -787,14 +857,15 @@ func (f *FakeMachineSet) Objs(cluster *clusterv1.Cluster, machineDeployment *clu // Adds the objects for the machines controlled by the machineSet for _, machine := range f.machines { - objs = append(objs, machine.Objs(cluster, false, machineSet, nil)...) + objs = append(objs, machine.Objs(cluster, false, machineSet, nil, nil)...) } return objs } type FakeMachine struct { - name string + name string + bootstrapConfig *clusterv1.Bootstrap } // NewFakeMachine return a FakeMachine that can generate a Machine object, all its own ancillary objects: @@ -807,7 +878,12 @@ func NewFakeMachine(name string) *FakeMachine { } } -func (f *FakeMachine) Objs(cluster *clusterv1.Cluster, generateCerts bool, machineSet *clusterv1.MachineSet, controlPlane *fakecontrolplane.GenericControlPlane) []client.Object { +func (f *FakeMachine) WithStaticBootstrapConfig() *FakeMachine { + f.bootstrapConfig = NewStaticBootstrapConfig(f.name) + return f +} + +func (f *FakeMachine) Objs(cluster *clusterv1.Cluster, generateCerts bool, machineSet *clusterv1.MachineSet, machinePool *expv1.MachinePool, controlPlane *fakecontrolplane.GenericControlPlane) []client.Object { machineInfrastructure := &fakeinfrastructure.GenericInfrastructureMachine{ TypeMeta: metav1.TypeMeta{ APIVersion: fakeinfrastructure.GroupVersion.String(), @@ -839,6 +915,12 @@ func (f *FakeMachine) Objs(cluster *clusterv1.Cluster, generateCerts bool, machi }, } + bootstrapConfig := f.bootstrapConfig + if bootstrapConfig == nil { + bootstrapConfig = NewBootstrapConfig(machineBootstrap) + bootstrapConfig.DataSecretName = &bootstrapDataSecretName + } + // Ensure the machineBootstrap gets a UID to be used by dependant objects for creating OwnerReferences. setUID(machineBootstrap) @@ -879,15 +961,6 @@ func (f *FakeMachine) Objs(cluster *clusterv1.Cluster, generateCerts bool, machi Name: machineInfrastructure.Name, Namespace: cluster.Namespace, }, - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - APIVersion: machineBootstrap.APIVersion, - Kind: machineBootstrap.Kind, - Name: machineBootstrap.Name, - Namespace: cluster.Namespace, - }, - DataSecretName: &bootstrapDataSecretName, - }, ClusterName: cluster.Name, }, } @@ -906,6 +979,11 @@ func (f *FakeMachine) Objs(cluster *clusterv1.Cluster, generateCerts bool, machi machine.SetOwnerReferences([]metav1.OwnerReference{*metav1.NewControllerRef(controlPlane, controlPlane.GroupVersionKind())}) // Sets the MachineControlPlane Label machine.Labels[clusterv1.MachineControlPlaneLabel] = "" + case machinePool != nil: + // If this machine belong to a machinePool, it is controlled by it / ownership set by the machinePool controller -- ** NOT RECONCILED ** + machine.SetOwnerReferences([]metav1.OwnerReference{*metav1.NewControllerRef(machinePool, machinePool.GroupVersionKind())}) + // Sets the MachinePoolNameLabel + machine.Labels[clusterv1.MachinePoolNameLabel] = machinePool.Name default: // If this machine does not belong to a machineSet or to a control plane, it is owned by the cluster / ownership set by the machine controller -- RECONCILED machine.SetOwnerReferences([]metav1.OwnerReference{{ @@ -947,23 +1025,28 @@ func (f *FakeMachine) Objs(cluster *clusterv1.Cluster, generateCerts bool, machi clusterv1.ClusterNameLabel: machine.Spec.ClusterName, }) - machineBootstrap.SetOwnerReferences([]metav1.OwnerReference{ - { - APIVersion: machine.APIVersion, - Kind: machine.Kind, - Name: machine.Name, - UID: machine.UID, - }, - }) - machineBootstrap.SetLabels(map[string]string{ - clusterv1.ClusterNameLabel: machine.Spec.ClusterName, - }) - objs := []client.Object{ machine, machineInfrastructure, - machineBootstrap, - bootstrapDataSecret, + } + + if machinePool == nil { + machine.Spec.Bootstrap = *bootstrapConfig + if machine.Spec.Bootstrap.ConfigRef != nil { + machineBootstrap.SetOwnerReferences([]metav1.OwnerReference{ + { + APIVersion: machine.APIVersion, + Kind: machine.Kind, + Name: machine.Name, + UID: machine.UID, + }, + }) + machineBootstrap.SetLabels(map[string]string{ + clusterv1.ClusterNameLabel: machine.Spec.ClusterName, + }) + + objs = append(objs, bootstrapDataSecret, machineBootstrap) + } } objs = append(objs, additionalObjs...) @@ -1099,6 +1182,7 @@ func (f *FakeClusterResourceSet) Objs() []client.Object { Namespace: cluster.Namespace, }, Spec: addonsv1.ClusterResourceSetBindingSpec{ + ClusterName: cluster.Name, Bindings: []*addonsv1.ResourceSetBinding{ { ClusterResourceSetName: crs.Name, @@ -1119,14 +1203,6 @@ func (f *FakeClusterResourceSet) Objs() []client.Object { objs = append(objs, binding) - // binding are owned by the Cluster / ownership set by the ClusterResourceSet controller - binding.SetOwnerReferences(append(binding.OwnerReferences, metav1.OwnerReference{ - APIVersion: cluster.APIVersion, - Kind: cluster.Kind, - Name: cluster.Name, - UID: cluster.UID, - })) - resourceSetBinding := addonsv1.ResourceSetBinding{ ClusterResourceSetName: crs.Name, Resources: []addonsv1.ResourceBinding{}, diff --git a/cmd/clusterctl/internal/test/fake_processor.go b/cmd/clusterctl/internal/test/fake_processor.go index 92a5dd229949..5b50b8aa0d48 100644 --- a/cmd/clusterctl/internal/test/fake_processor.go +++ b/cmd/clusterctl/internal/test/fake_processor.go @@ -16,7 +16,9 @@ limitations under the License. package test -import "fmt" +import ( + "fmt" +) type FakeProcessor struct { errGetVariables error diff --git a/cmd/clusterctl/internal/test/fake_proxy.go b/cmd/clusterctl/internal/test/fake_proxy.go index f1524f477c8c..3e421fa0e619 100644 --- a/cmd/clusterctl/internal/test/fake_proxy.go +++ b/cmd/clusterctl/internal/test/fake_proxy.go @@ -17,6 +17,7 @@ limitations under the License. package test import ( + "context" "errors" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -25,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -78,7 +79,7 @@ func (f *FakeProxy) GetConfig() (*rest.Config, error) { return nil, nil } -func (f *FakeProxy) NewClient() (client.Client, error) { +func (f *FakeProxy) NewClient(_ context.Context) (client.Client, error) { if f.cs != nil { return f.cs, nil } @@ -86,7 +87,7 @@ func (f *FakeProxy) NewClient() (client.Client, error) { return f.cs, nil } -func (f *FakeProxy) CheckClusterAvailable() error { +func (f *FakeProxy) CheckClusterAvailable(_ context.Context) error { // default to considering the cluster as available unless explicitly set to be // unavailable. if f.available == nil || *f.available { @@ -96,7 +97,7 @@ func (f *FakeProxy) CheckClusterAvailable() error { } // ListResources returns all the resources known by the FakeProxy. -func (f *FakeProxy) ListResources(labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) { +func (f *FakeProxy) ListResources(_ context.Context, labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) { var ret []unstructured.Unstructured //nolint:prealloc for _, o := range f.objs { u := unstructured.Unstructured{} @@ -142,7 +143,7 @@ func (f *FakeProxy) GetContexts(_ string) ([]string, error) { return nil, nil } -func (f *FakeProxy) GetResourceNames(_, _ string, _ []client.ListOption, _ string) ([]string, error) { +func (f *FakeProxy) GetResourceNames(_ context.Context, _, _ string, _ []client.ListOption, _ string) ([]string, error) { return nil, nil } @@ -202,7 +203,7 @@ func (f *FakeProxy) WithFakeCAPISetup() *FakeProxy { } func (f *FakeProxy) WithClusterAvailable(available bool) *FakeProxy { - f.available = pointer.Bool(available) + f.available = ptr.To(available) return f } diff --git a/cmd/clusterctl/internal/test/fake_reader.go b/cmd/clusterctl/internal/test/fake_reader.go index 3fd8ea4c9f05..d34fa6821fc2 100644 --- a/cmd/clusterctl/internal/test/fake_reader.go +++ b/cmd/clusterctl/internal/test/fake_reader.go @@ -17,6 +17,8 @@ limitations under the License. package test import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/yaml" @@ -55,7 +57,7 @@ type imageMeta struct { Tag string `json:"tag,omitempty"` } -func (f *FakeReader) Init(_ string) error { +func (f *FakeReader) Init(_ context.Context, _ string) error { f.initialized = true return nil } diff --git a/cmd/clusterctl/internal/test/providers/bootstrap/zz_generated.deepcopy.go b/cmd/clusterctl/internal/test/providers/bootstrap/zz_generated.deepcopy.go index 32f72b4d8136..e7f3b2ac19fe 100644 --- a/cmd/clusterctl/internal/test/providers/bootstrap/zz_generated.deepcopy.go +++ b/cmd/clusterctl/internal/test/providers/bootstrap/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. diff --git a/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go b/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go index 921510065dde..b4a130942c87 100644 --- a/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go +++ b/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go @@ -50,7 +50,5 @@ type GenericControlPlaneList struct { } func init() { - SchemeBuilder.Register( - &GenericControlPlane{}, &GenericControlPlaneList{}, - ) + objectTypes = append(objectTypes, &GenericControlPlane{}, &GenericControlPlaneList{}) } diff --git a/cmd/clusterctl/internal/test/providers/controlplane/groupversion_info.go b/cmd/clusterctl/internal/test/providers/controlplane/groupversion_info.go index 5586f446ad1a..0c2e52bb8893 100644 --- a/cmd/clusterctl/internal/test/providers/controlplane/groupversion_info.go +++ b/cmd/clusterctl/internal/test/providers/controlplane/groupversion_info.go @@ -20,17 +20,25 @@ limitations under the License. package controlplane import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( // GroupVersion is group version used to register these objects. GroupVersion = schema.GroupVersion{Group: "controlplane.cluster.x-k8s.io", Version: "v1beta1"} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme. - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme + AddToScheme = schemeBuilder.AddToScheme + + objectTypes = []runtime.Object{} ) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, objectTypes...) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/cmd/clusterctl/internal/test/providers/controlplane/zz_generated.deepcopy.go b/cmd/clusterctl/internal/test/providers/controlplane/zz_generated.deepcopy.go index 84570bb55ebe..aacc2ef45d3d 100644 --- a/cmd/clusterctl/internal/test/providers/controlplane/zz_generated.deepcopy.go +++ b/cmd/clusterctl/internal/test/providers/controlplane/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. @@ -22,7 +21,7 @@ limitations under the License. package controlplane import ( - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/cmd/clusterctl/internal/test/providers/external/zz_generated.deepcopy.go b/cmd/clusterctl/internal/test/providers/external/zz_generated.deepcopy.go index 1b8de12c5265..4d752f377a17 100644 --- a/cmd/clusterctl/internal/test/providers/external/zz_generated.deepcopy.go +++ b/cmd/clusterctl/internal/test/providers/external/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. diff --git a/cmd/clusterctl/internal/test/providers/infrastructure/zz_generated.deepcopy.go b/cmd/clusterctl/internal/test/providers/infrastructure/zz_generated.deepcopy.go index 0dc8a06d9163..d568d17bfe7e 100644 --- a/cmd/clusterctl/internal/test/providers/infrastructure/zz_generated.deepcopy.go +++ b/cmd/clusterctl/internal/test/providers/infrastructure/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. diff --git a/cmd/clusterctl/internal/util/obj_refs_test.go b/cmd/clusterctl/internal/util/obj_refs_test.go index 860cd6b5ce0b..4459fe525f0e 100644 --- a/cmd/clusterctl/internal/util/obj_refs_test.go +++ b/cmd/clusterctl/internal/util/obj_refs_test.go @@ -85,7 +85,7 @@ func TestGetObjectReferences(t *testing.T) { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(HaveLen(len(tt.want))) for i := range got { g.Expect(got[i].Kind).To(Equal(tt.want[i].Kind)) diff --git a/cmd/clusterctl/internal/util/objs_test.go b/cmd/clusterctl/internal/util/objs_test.go index 783133bdda1b..829d4bf26ba8 100644 --- a/cmd/clusterctl/internal/util/objs_test.go +++ b/cmd/clusterctl/internal/util/objs_test.go @@ -169,7 +169,7 @@ func Test_inspectImages(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } @@ -265,10 +265,10 @@ func TestFixImages(t *testing.T) { return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) gotImages, err := InspectImages(got) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(gotImages).To(Equal(tt.want)) }) } diff --git a/cmd/clusterctl/log/logger.go b/cmd/clusterctl/log/logger.go index a64e2bb657cd..34435cdea22e 100644 --- a/cmd/clusterctl/log/logger.go +++ b/cmd/clusterctl/log/logger.go @@ -106,7 +106,7 @@ func (l *logger) V(level int) logr.LogSink { // WithName adds a new element to the logger's name. func (l *logger) WithName(name string) logr.LogSink { nl := l.clone() - if len(l.prefix) > 0 { + if l.prefix != "" { nl.prefix = l.prefix + "/" } nl.prefix += name diff --git a/cmd/clusterctl/log/logger_test.go b/cmd/clusterctl/log/logger_test.go index d1b45dc37d5f..d1d0b31de162 100644 --- a/cmd/clusterctl/log/logger_test.go +++ b/cmd/clusterctl/log/logger_test.go @@ -21,7 +21,7 @@ import ( . "github.com/onsi/gomega" "github.com/pkg/errors" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestFlatten(t *testing.T) { @@ -100,7 +100,7 @@ func TestFlatten(t *testing.T) { Level: 0, Values: tt.args.kvList, }) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(Equal(tt.want)) }) } @@ -115,7 +115,7 @@ func TestLoggerEnabled(t *testing.T) { }{ { name: "Return true when level is set below the threshold", - threshold: pointer.Int(5), + threshold: ptr.To(5), level: 1, want: true, }, @@ -126,7 +126,7 @@ func TestLoggerEnabled(t *testing.T) { }, { name: "Return false when level is set above the threshold", - threshold: pointer.Int(5), + threshold: ptr.To(5), level: 7, want: false, }, diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml index b1ae09af576d..f71fc097e973 100644 --- a/config/certmanager/certificate.yaml +++ b/config/certmanager/certificate.yaml @@ -14,14 +14,14 @@ metadata: name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml namespace: system spec: - # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize dnsNames: - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + - SERVICE_NAME.SERVICE_NAMESPACE.svc + - SERVICE_NAME.SERVICE_NAMESPACE.svc.cluster.local issuerRef: kind: Issuer name: selfsigned-issuer - secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize + secretName: capi-webhook-service-cert # this secret will not be prefixed, since it's not managed by kustomize subject: organizations: - - k8s-sig-cluster-lifecycle \ No newline at end of file + - k8s-sig-cluster-lifecycle diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml index 28a895a404a9..2b4342655949 100644 --- a/config/certmanager/kustomizeconfig.yaml +++ b/config/certmanager/kustomizeconfig.yaml @@ -7,13 +7,3 @@ nameReference: group: cert-manager.io path: spec/issuerRef/name -varReference: -- kind: Certificate - group: cert-manager.io - path: spec/commonName -- kind: Certificate - group: cert-manager.io - path: spec/dnsNames -- kind: Certificate - group: cert-manager.io - path: spec/secretName diff --git a/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesetbindings.yaml b/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesetbindings.yaml index edf31d9af124..d1328c575fe5 100644 --- a/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesetbindings.yaml +++ b/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesetbindings.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: clusterresourcesetbindings.addons.cluster.x-k8s.io spec: group: addons.cluster.x-k8s.io @@ -17,22 +16,30 @@ spec: singular: clusterresourcesetbinding scope: Namespaced versions: - - name: v1alpha3 + - deprecated: true + name: v1alpha3 schema: openAPIV3Schema: - description: "ClusterResourceSetBinding lists all matching ClusterResourceSets - with the cluster it belongs to. \n Deprecated: This type will be removed - in one of the next releases." + description: |- + ClusterResourceSetBinding lists all matching ClusterResourceSets with the cluster it belongs to. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -63,10 +70,9 @@ spec: to the cluster or not. type: boolean hash: - description: Hash is the hash of a resource's data. This - can be used to decide if a resource is changed. For - "ApplyOnce" ClusterResourceSet.spec.strategy, this is - no-op as that strategy does not act on change. + description: |- + Hash is the hash of a resource's data. This can be used to decide if a resource is changed. + For "ApplyOnce" ClusterResourceSet.spec.strategy, this is no-op as that strategy does not act on change. type: string kind: description: 'Kind of the resource. Supported kinds are: @@ -97,7 +103,7 @@ spec: type: array type: object type: object - served: true + served: false storage: false subresources: status: {} @@ -106,22 +112,30 @@ spec: jsonPath: .metadata.creationTimestamp name: Age type: date + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "ClusterResourceSetBinding lists all matching ClusterResourceSets - with the cluster it belongs to. \n Deprecated: This type will be removed - in one of the next releases." + description: |- + ClusterResourceSetBinding lists all matching ClusterResourceSets with the cluster it belongs to. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -152,10 +166,9 @@ spec: to the cluster or not. type: boolean hash: - description: Hash is the hash of a resource's data. This - can be used to decide if a resource is changed. For - "ApplyOnce" ClusterResourceSet.spec.strategy, this is - no-op as that strategy does not act on change. + description: |- + Hash is the hash of a resource's data. This can be used to decide if a resource is changed. + For "ApplyOnce" ClusterResourceSet.spec.strategy, this is no-op as that strategy does not act on change. type: string kind: description: 'Kind of the resource. Supported kinds are: @@ -186,7 +199,7 @@ spec: type: array type: object type: object - served: true + served: false storage: false subresources: status: {} @@ -202,14 +215,19 @@ spec: with the cluster it belongs to. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -240,10 +258,9 @@ spec: to the cluster or not. type: boolean hash: - description: Hash is the hash of a resource's data. This - can be used to decide if a resource is changed. For - "ApplyOnce" ClusterResourceSet.spec.strategy, this is - no-op as that strategy does not act on change. + description: |- + Hash is the hash of a resource's data. This can be used to decide if a resource is changed. + For "ApplyOnce" ClusterResourceSet.spec.strategy, this is no-op as that strategy does not act on change. type: string kind: description: 'Kind of the resource. Supported kinds are: @@ -272,6 +289,11 @@ spec: - clusterResourceSetName type: object type: array + clusterName: + description: |- + ClusterName is the name of the Cluster this binding applies to. + Note: this field mandatory in v1beta2. + type: string type: object type: object served: true diff --git a/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesets.yaml b/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesets.yaml index 84fa6cd6c379..a9d0314ebd68 100644 --- a/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesets.yaml +++ b/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesets.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: clusterresourcesets.addons.cluster.x-k8s.io spec: group: addons.cluster.x-k8s.io @@ -17,21 +16,30 @@ spec: singular: clusterresourceset scope: Namespaced versions: - - name: v1alpha3 + - deprecated: true + name: v1alpha3 schema: openAPIV3Schema: - description: "ClusterResourceSet is the Schema for the clusterresourcesets - API. \n Deprecated: This type will be removed in one of the next releases." + description: |- + ClusterResourceSet is the Schema for the clusterresourcesets API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -39,51 +47,54 @@ spec: description: ClusterResourceSetSpec defines the desired state of ClusterResourceSet. properties: clusterSelector: - description: Label selector for Clusters. The Clusters that are selected - by this will be the ones affected by this ClusterResourceSet. It - must match the Cluster labels. This field is immutable. + description: |- + Label selector for Clusters. The Clusters that are + selected by this will be the ones affected by this ClusterResourceSet. + It must match the Cluster labels. This field is immutable. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic resources: description: Resources is a list of Secrets/ConfigMaps where each contains 1 or more resources to be applied to remote clusters. @@ -126,37 +137,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -170,7 +181,7 @@ spec: type: integer type: object type: object - served: true + served: false storage: false subresources: status: {} @@ -179,21 +190,30 @@ spec: jsonPath: .metadata.creationTimestamp name: Age type: date + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "ClusterResourceSet is the Schema for the clusterresourcesets - API. \n Deprecated: This type will be removed in one of the next releases." + description: |- + ClusterResourceSet is the Schema for the clusterresourcesets API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -201,52 +221,55 @@ spec: description: ClusterResourceSetSpec defines the desired state of ClusterResourceSet. properties: clusterSelector: - description: Label selector for Clusters. The Clusters that are selected - by this will be the ones affected by this ClusterResourceSet. It - must match the Cluster labels. This field is immutable. Label selector - cannot be empty. + description: |- + Label selector for Clusters. The Clusters that are + selected by this will be the ones affected by this ClusterResourceSet. + It must match the Cluster labels. This field is immutable. + Label selector cannot be empty. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic resources: description: Resources is a list of Secrets/ConfigMaps where each contains 1 or more resources to be applied to remote clusters. @@ -289,37 +312,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -333,7 +356,7 @@ spec: type: integer type: object type: object - served: true + served: false storage: false subresources: status: {} @@ -349,14 +372,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -364,52 +392,55 @@ spec: description: ClusterResourceSetSpec defines the desired state of ClusterResourceSet. properties: clusterSelector: - description: Label selector for Clusters. The Clusters that are selected - by this will be the ones affected by this ClusterResourceSet. It - must match the Cluster labels. This field is immutable. Label selector - cannot be empty. + description: |- + Label selector for Clusters. The Clusters that are + selected by this will be the ones affected by this ClusterResourceSet. + It must match the Cluster labels. This field is immutable. + Label selector cannot be empty. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic resources: description: Resources is a list of Secrets/ConfigMaps where each contains 1 or more resources to be applied to remote clusters. @@ -453,37 +484,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime diff --git a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml index 69da154b6f4b..18a4c549d0dd 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: clusterclasses.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -24,22 +23,30 @@ spec: jsonPath: .metadata.creationTimestamp name: Age type: date + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "ClusterClass is a template which can be used to create managed - topologies. \n Deprecated: This type will be removed in one of the next - releases." + description: |- + ClusterClass is a template which can be used to create managed topologies. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -47,51 +54,62 @@ spec: description: ClusterClassSpec describes the desired state of the ClusterClass. properties: controlPlane: - description: ControlPlane is a reference to a local struct that holds - the details for provisioning the Control Plane for the Cluster. + description: |- + ControlPlane is a reference to a local struct that holds the details + for provisioning the Control Plane for the Cluster. properties: machineInfrastructure: - description: "MachineTemplate defines the metadata and infrastructure - information for control plane machines. \n This field is supported - if and only if the control plane provider template referenced - above is Machine based and supports setting replicas." + description: |- + MachineTemplate defines the metadata and infrastructure information + for control plane machines. + + + This field is supported if and only if the control plane provider template + referenced above is Machine based and supports setting replicas. properties: ref: - description: Ref is a required reference to a custom resource + description: |- + Ref is a required reference to a custom resource offered by a provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -99,65 +117,76 @@ spec: - ref type: object metadata: - description: "Metadata is the metadata applied to the machines - of the ControlPlane. At runtime this metadata is merged with - the corresponding metadata from the topology. \n This field - is supported if and only if the control plane provider template - referenced is Machine based." + description: |- + Metadata is the metadata applied to the machines of the ControlPlane. + At runtime this metadata is merged with the corresponding metadata from the topology. + + + This field is supported if and only if the control plane provider template + referenced is Machine based. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object ref: - description: Ref is a required reference to a custom resource + description: |- + Ref is a required reference to a custom resource offered by a provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part - of an object. TODO: this design is not final and this field - is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -165,47 +194,56 @@ spec: - ref type: object infrastructure: - description: Infrastructure is a reference to a provider-specific - template that holds the details for provisioning infrastructure - specific cluster for the underlying provider. The underlying provider - is responsible for the implementation of the template to an infrastructure - cluster. + description: |- + Infrastructure is a reference to a provider-specific template that holds + the details for provisioning infrastructure specific cluster + for the underlying provider. + The underlying provider is responsible for the implementation + of the template to an infrastructure cluster. properties: ref: - description: Ref is a required reference to a custom resource + description: |- + Ref is a required reference to a custom resource offered by a provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part - of an object. TODO: this design is not final and this field - is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -213,77 +251,79 @@ spec: - ref type: object workers: - description: Workers describes the worker nodes for the cluster. It - is a collection of node types which can be used to create the worker - nodes of the cluster. + description: |- + Workers describes the worker nodes for the cluster. + It is a collection of node types which can be used to create + the worker nodes of the cluster. properties: machineDeployments: - description: MachineDeployments is a list of machine deployment - classes that can be used to create a set of worker nodes. + description: |- + MachineDeployments is a list of machine deployment classes that can be used to create + a set of worker nodes. items: - description: MachineDeploymentClass serves as a template to - define a set of worker nodes of the cluster provisioned using - the `ClusterClass`. + description: |- + MachineDeploymentClass serves as a template to define a set of worker nodes of the cluster + provisioned using the `ClusterClass`. properties: class: - description: Class denotes a type of worker node present - in the cluster, this name MUST be unique within a ClusterClass - and can be referenced in the Cluster to create a managed - MachineDeployment. + description: |- + Class denotes a type of worker node present in the cluster, + this name MUST be unique within a ClusterClass and can be referenced + in the Cluster to create a managed MachineDeployment. type: string template: - description: Template is a local struct containing a collection - of templates for creation of MachineDeployment objects - representing a set of worker nodes. + description: |- + Template is a local struct containing a collection of templates for creation of + MachineDeployment objects representing a set of worker nodes. properties: bootstrap: - description: Bootstrap contains the bootstrap template - reference to be used for the creation of worker Machines. + description: |- + Bootstrap contains the bootstrap template reference to be used + for the creation of worker Machines. properties: ref: - description: Ref is a required reference to a custom - resource offered by a provider. + description: |- + Ref is a required reference to a custom resource + offered by a provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an - object instead of an entire object, this string - should contain a valid JSON/Go field access - statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to - a container within a pod, this would take - on a value like: "spec.containers{name}" (where - "name" refers to the name of the container - that triggered the event) or if no container - name is specified "spec.containers[2]" (container - with index 2 in this pod). This syntax is - chosen only to have some well-defined way - of referencing a part of an object. TODO: - this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which - this reference is made, if any. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -291,54 +331,53 @@ spec: - ref type: object infrastructure: - description: Infrastructure contains the infrastructure - template reference to be used for the creation of - worker Machines. + description: |- + Infrastructure contains the infrastructure template reference to be used + for the creation of worker Machines. properties: ref: - description: Ref is a required reference to a custom - resource offered by a provider. + description: |- + Ref is a required reference to a custom resource + offered by a provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an - object instead of an entire object, this string - should contain a valid JSON/Go field access - statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to - a container within a pod, this would take - on a value like: "spec.containers{name}" (where - "name" refers to the name of the container - that triggered the event) or if no container - name is specified "spec.containers[2]" (container - with index 2 in this pod). This syntax is - chosen only to have some well-defined way - of referencing a part of an object. TODO: - this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which - this reference is made, if any. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -346,27 +385,27 @@ spec: - ref type: object metadata: - description: Metadata is the metadata applied to the - machines of the MachineDeployment. At runtime this - metadata is merged with the corresponding metadata - from the topology. + description: |- + Metadata is the metadata applied to the machines of the MachineDeployment. + At runtime this metadata is merged with the corresponding metadata from the topology. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key - value map stored with a resource that may be set - by external tools to store and retrieve arbitrary - metadata. They are not queryable and should be - preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that - can be used to organize and categorize (scope - and select) objects. May match selectors of replication - controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object required: @@ -381,7 +420,7 @@ spec: type: object type: object type: object - served: true + served: false storage: false subresources: {} - additionalPrinterColumns: @@ -396,14 +435,19 @@ spec: topologies. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -411,82 +455,91 @@ spec: description: ClusterClassSpec describes the desired state of the ClusterClass. properties: controlPlane: - description: ControlPlane is a reference to a local struct that holds - the details for provisioning the Control Plane for the Cluster. + description: |- + ControlPlane is a reference to a local struct that holds the details + for provisioning the Control Plane for the Cluster. properties: machineHealthCheck: - description: MachineHealthCheck defines a MachineHealthCheck for - this ControlPlaneClass. This field is supported if and only - if the ControlPlane provider template referenced above is Machine - based and supports setting replicas. + description: |- + MachineHealthCheck defines a MachineHealthCheck for this ControlPlaneClass. + This field is supported if and only if the ControlPlane provider template + referenced above is Machine based and supports setting replicas. properties: maxUnhealthy: anyOf: - type: integer - type: string - description: Any further remediation is only allowed if at - most "MaxUnhealthy" machines selected by "selector" are - not healthy. + description: |- + Any further remediation is only allowed if at most "MaxUnhealthy" machines selected by + "selector" are not healthy. x-kubernetes-int-or-string: true nodeStartupTimeout: - description: Machines older than this duration without a node - will be considered to have failed and will be remediated. - If you wish to disable this feature, set the value explicitly - to 0. + description: |- + Machines older than this duration without a node will be considered to have + failed and will be remediated. + If you wish to disable this feature, set the value explicitly to 0. type: string remediationTemplate: - description: "RemediationTemplate is a reference to a remediation - template provided by an infrastructure provider. \n This - field is completely optional, when filled, the MachineHealthCheck - controller creates a new object from the template referenced - and hands off remediation of the machine to a controller - that lives outside of Cluster API." + description: |- + RemediationTemplate is a reference to a remediation template + provided by an infrastructure provider. + + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic unhealthyConditions: - description: UnhealthyConditions contains a list of the conditions - that determine whether a node is considered unhealthy. The - conditions are combined in a logical OR, i.e. if any of - the conditions is met, the node is unhealthy. + description: |- + UnhealthyConditions contains a list of the conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. items: - description: UnhealthyCondition represents a Node condition - type and value with a timeout specified as a duration. When - the named condition has been in the given status for at - least the timeout value, a node is considered unhealthy. + description: |- + UnhealthyCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. properties: status: minLength: 1 @@ -503,57 +556,67 @@ spec: type: object type: array unhealthyRange: - description: 'Any further remediation is only allowed if the - number of machines selected by "selector" as not healthy - is within the range of "UnhealthyRange". Takes precedence - over MaxUnhealthy. Eg. "[3-5]" - This means that remediation - will be allowed only when: (a) there are at least 3 unhealthy - machines (and) (b) there are at most 5 unhealthy machines' + description: |- + Any further remediation is only allowed if the number of machines selected by "selector" as not healthy + is within the range of "UnhealthyRange". Takes precedence over MaxUnhealthy. + Eg. "[3-5]" - This means that remediation will be allowed only when: + (a) there are at least 3 unhealthy machines (and) + (b) there are at most 5 unhealthy machines pattern: ^\[[0-9]+-[0-9]+\]$ type: string type: object machineInfrastructure: - description: "MachineInfrastructure defines the metadata and infrastructure - information for control plane machines. \n This field is supported - if and only if the control plane provider template referenced - above is Machine based and supports setting replicas." + description: |- + MachineInfrastructure defines the metadata and infrastructure information + for control plane machines. + + + This field is supported if and only if the control plane provider template + referenced above is Machine based and supports setting replicas. properties: ref: - description: Ref is a required reference to a custom resource + description: |- + Ref is a required reference to a custom resource offered by a provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -561,89 +624,113 @@ spec: - ref type: object metadata: - description: "Metadata is the metadata applied to the ControlPlane - and the Machines of the ControlPlane if the ControlPlaneTemplate - referenced is machine based. If not, it is applied only to the - ControlPlane. At runtime this metadata is merged with the corresponding - metadata from the topology. \n This field is supported if and - only if the control plane provider template referenced is Machine - based." + description: |- + Metadata is the metadata applied to the ControlPlane and the Machines of the ControlPlane + if the ControlPlaneTemplate referenced is machine based. If not, it is applied only to the + ControlPlane. + At runtime this metadata is merged with the corresponding metadata from the topology. + + + This field is supported if and only if the control plane provider template + referenced is Machine based. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object + namingStrategy: + description: NamingStrategy allows changing the naming pattern + used when creating the control plane provider object. + properties: + template: + description: |- + Template defines the template to use for generating the name of the ControlPlane object. + If not defined, it will fallback to `{{ .cluster.name }}-{{ .random }}`. + If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + get concatenated with a random suffix of length 5. + The templating mechanism provides the following arguments: + * `.cluster.name`: The name of the cluster object. + * `.random`: A random alphanumeric string, without vowels, of length 5. + type: string + type: object nodeDeletionTimeout: - description: 'NodeDeletionTimeout defines how long the controller - will attempt to delete the Node that the Machine hosts after - the Machine is marked for deletion. A duration of 0 will retry - deletion indefinitely. Defaults to 10 seconds. NOTE: This value - can be overridden while defining a Cluster.Topology.' + description: |- + NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. + NOTE: This value can be overridden while defining a Cluster.Topology. type: string nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time that - the controller will spend on draining a node. The default value - is 0, meaning that the node can be drained without any time - limitations. NOTE: NodeDrainTimeout is different from `kubectl - drain --timeout` NOTE: This value can be overridden while defining - a Cluster.Topology.' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + NOTE: This value can be overridden while defining a Cluster.Topology. type: string nodeVolumeDetachTimeout: - description: 'NodeVolumeDetachTimeout is the total amount of time - that the controller will spend on waiting for all volumes to - be detached. The default value is 0, meaning that the volumes - can be detached without any time limitations. NOTE: This value - can be overridden while defining a Cluster.Topology.' + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + NOTE: This value can be overridden while defining a Cluster.Topology. type: string ref: - description: Ref is a required reference to a custom resource + description: |- + Ref is a required reference to a custom resource offered by a provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part - of an object. TODO: this design is not final and this field - is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -651,47 +738,56 @@ spec: - ref type: object infrastructure: - description: Infrastructure is a reference to a provider-specific - template that holds the details for provisioning infrastructure - specific cluster for the underlying provider. The underlying provider - is responsible for the implementation of the template to an infrastructure - cluster. + description: |- + Infrastructure is a reference to a provider-specific template that holds + the details for provisioning infrastructure specific cluster + for the underlying provider. + The underlying provider is responsible for the implementation + of the template to an infrastructure cluster. properties: ref: - description: Ref is a required reference to a custom resource + description: |- + Ref is a required reference to a custom resource offered by a provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part - of an object. TODO: this design is not final and this field - is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -699,71 +795,70 @@ spec: - ref type: object patches: - description: 'Patches defines the patches which are applied to customize - referenced templates of a ClusterClass. Note: Patches will be applied - in the order of the array.' + description: |- + Patches defines the patches which are applied to customize + referenced templates of a ClusterClass. + Note: Patches will be applied in the order of the array. items: description: ClusterClassPatch defines a patch which is applied to customize the referenced templates. properties: definitions: - description: 'Definitions define inline patches. Note: Patches - will be applied in the order of the array. Note: Exactly one - of Definitions or External must be set.' + description: |- + Definitions define inline patches. + Note: Patches will be applied in the order of the array. + Note: Exactly one of Definitions or External must be set. items: description: PatchDefinition defines a patch which is applied to customize the referenced templates. properties: jsonPatches: - description: 'JSONPatches defines the patches which should - be applied on the templates matching the selector. Note: - Patches will be applied in the order of the array.' + description: |- + JSONPatches defines the patches which should be applied on the templates + matching the selector. + Note: Patches will be applied in the order of the array. items: description: JSONPatch defines a JSON patch. properties: op: - description: 'Op defines the operation of the patch. - Note: Only `add`, `replace` and `remove` are supported.' + description: |- + Op defines the operation of the patch. + Note: Only `add`, `replace` and `remove` are supported. type: string path: - description: 'Path defines the path of the patch. - Note: Only the spec of a template can be patched, - thus the path has to start with /spec/. Note: - For now the only allowed array modifications are - `append` and `prepend`, i.e.: * for op: `add`: - only index 0 (prepend) and - (append) are allowed - * for op: `replace` or `remove`: no indexes are - allowed' + description: |- + Path defines the path of the patch. + Note: Only the spec of a template can be patched, thus the path has to start with /spec/. + Note: For now the only allowed array modifications are `append` and `prepend`, i.e.: + * for op: `add`: only index 0 (prepend) and - (append) are allowed + * for op: `replace` or `remove`: no indexes are allowed type: string value: - description: 'Value defines the value of the patch. - Note: Either Value or ValueFrom is required for - add and replace operations. Only one of them is - allowed to be set at the same time. Note: We have - to use apiextensionsv1.JSON instead of our JSON - type, because controller-tools has a hard-coded - schema for apiextensionsv1.JSON which cannot be - produced by another type (unset type field). Ref: - https://github.com/kubernetes-sigs/controller-tools/blob/d0e03a142d0ecdd5491593e941ee1d6b5d91dba6/pkg/crd/known_types.go#L106-L111' + description: |- + Value defines the value of the patch. + Note: Either Value or ValueFrom is required for add and replace + operations. Only one of them is allowed to be set at the same time. + Note: We have to use apiextensionsv1.JSON instead of our JSON type, + because controller-tools has a hard-coded schema for apiextensionsv1.JSON + which cannot be produced by another type (unset type field). + Ref: https://github.com/kubernetes-sigs/controller-tools/blob/d0e03a142d0ecdd5491593e941ee1d6b5d91dba6/pkg/crd/known_types.go#L106-L111 x-kubernetes-preserve-unknown-fields: true valueFrom: - description: 'ValueFrom defines the value of the - patch. Note: Either Value or ValueFrom is required - for add and replace operations. Only one of them - is allowed to be set at the same time.' + description: |- + ValueFrom defines the value of the patch. + Note: Either Value or ValueFrom is required for add and replace + operations. Only one of them is allowed to be set at the same time. properties: template: - description: 'Template is the Go template to - be used to calculate the value. A template - can reference variables defined in .spec.variables - and builtin variables. Note: The template - must evaluate to a valid YAML or JSON value.' + description: |- + Template is the Go template to be used to calculate the value. + A template can reference variables defined in .spec.variables and builtin variables. + Note: The template must evaluate to a valid YAML or JSON value. type: string variable: - description: Variable is the variable to be - used as value. Variable can be one of the - variables defined in .spec.variables or a - builtin variable. + description: |- + Variable is the variable to be used as value. + Variable can be one of the variables defined in .spec.variables or a builtin variable. type: string type: object required: @@ -786,19 +881,31 @@ spec: on where they are referenced. properties: controlPlane: - description: 'ControlPlane selects templates referenced - in .spec.ControlPlane. Note: this will match - the controlPlane and also the controlPlane machineInfrastructure - (depending on the kind and apiVersion).' + description: |- + ControlPlane selects templates referenced in .spec.ControlPlane. + Note: this will match the controlPlane and also the controlPlane + machineInfrastructure (depending on the kind and apiVersion). type: boolean infrastructureCluster: description: InfrastructureCluster selects templates referenced in .spec.infrastructure. type: boolean machineDeploymentClass: - description: MachineDeploymentClass selects templates - referenced in specific MachineDeploymentClasses - in .spec.workers.machineDeployments. + description: |- + MachineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in + .spec.workers.machineDeployments. + properties: + names: + description: Names selects templates by class + names. + items: + type: string + type: array + type: object + machinePoolClass: + description: |- + MachinePoolClass selects templates referenced in specific MachinePoolClasses in + .spec.workers.machinePools. properties: names: description: Names selects templates by class @@ -823,16 +930,17 @@ spec: this patch. type: string enabledIf: - description: EnabledIf is a Go template to be used to calculate - if a patch should be enabled. It can reference variables defined - in .spec.variables and builtin variables. The patch will be - enabled if the template evaluates to `true`, otherwise it - will be disabled. If EnabledIf is not set, the patch will - be enabled per default. + description: |- + EnabledIf is a Go template to be used to calculate if a patch should be enabled. + It can reference variables defined in .spec.variables and builtin variables. + The patch will be enabled if the template evaluates to `true`, otherwise it will + be disabled. + If EnabledIf is not set, the patch will be enabled per default. type: string external: - description: 'External defines an external patch. Note: Exactly - one of Definitions or External must be set.' + description: |- + External defines an external patch. + Note: Exactly one of Definitions or External must be set. properties: discoverVariablesExtension: description: DiscoverVariablesExtension references an extension @@ -845,9 +953,10 @@ spec: settings: additionalProperties: type: string - description: Settings defines key value pairs to be passed - to the extensions. Values defined here take precedence - over the values defined in the corresponding ExtensionConfig. + description: |- + Settings defines key value pairs to be passed to the extensions. + Values defined here take precedence over the values defined in the + corresponding ExtensionConfig. type: object validateExtension: description: ValidateExtension references an extension which @@ -862,48 +971,76 @@ spec: type: object type: array variables: - description: Variables defines the variables which can be configured + description: |- + Variables defines the variables which can be configured in the Cluster topology and are then used in patches. items: - description: ClusterClassVariable defines a variable which can be - configured in the Cluster topology and used in patches. + description: |- + ClusterClassVariable defines a variable which can + be configured in the Cluster topology and used in patches. properties: + metadata: + description: |- + Metadata is the metadata of a variable. + It can be used to add additional data for higher level tools to + a ClusterClassVariable. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map that can be used to store and + retrieve arbitrary metadata. + They are not queryable. + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) variables. + type: object + type: object name: description: Name of the variable. type: string required: - description: 'Required specifies if the variable is required. + description: |- + Required specifies if the variable is required. Note: this applies to the variable as a whole and thus the top-level object defined in the schema. If nested fields are - required, this will be specified inside the schema.' + required, this will be specified inside the schema. type: boolean schema: description: Schema defines the schema of the variable. properties: openAPIV3Schema: - description: OpenAPIV3Schema defines the schema of a variable - via OpenAPI v3 schema. The schema is a subset of the schema - used in Kubernetes CRDs. + description: |- + OpenAPIV3Schema defines the schema of a variable via OpenAPI v3 + schema. The schema is a subset of the schema used in + Kubernetes CRDs. properties: additionalProperties: - description: 'AdditionalProperties specifies the schema - of values in a map (keys are always strings). NOTE: - Can only be set if type is object. NOTE: AdditionalProperties - is mutually exclusive with Properties. NOTE: This - field uses PreserveUnknownFields and Schemaless, because - recursive validation is not possible.' + description: |- + AdditionalProperties specifies the schema of values in a map (keys are always strings). + NOTE: Can only be set if type is object. + NOTE: AdditionalProperties is mutually exclusive with Properties. + NOTE: This field uses PreserveUnknownFields and Schemaless, + because recursive validation is not possible. x-kubernetes-preserve-unknown-fields: true default: - description: 'Default is the default value of the variable. - NOTE: Can be set for all types.' + description: |- + Default is the default value of the variable. + NOTE: Can be set for all types. x-kubernetes-preserve-unknown-fields: true description: description: Description is a human-readable description of this variable. type: string enum: - description: 'Enum is the list of valid values of the - variable. NOTE: Can be set for all types.' + description: |- + Enum is the list of valid values of the variable. + NOTE: Can be set for all types. items: x-kubernetes-preserve-unknown-fields: true type: array @@ -911,101 +1048,104 @@ spec: description: Example is an example for this variable. x-kubernetes-preserve-unknown-fields: true exclusiveMaximum: - description: 'ExclusiveMaximum specifies if the Maximum - is exclusive. NOTE: Can only be set if type is integer - or number.' + description: |- + ExclusiveMaximum specifies if the Maximum is exclusive. + NOTE: Can only be set if type is integer or number. type: boolean exclusiveMinimum: - description: 'ExclusiveMinimum specifies if the Minimum - is exclusive. NOTE: Can only be set if type is integer - or number.' + description: |- + ExclusiveMinimum specifies if the Minimum is exclusive. + NOTE: Can only be set if type is integer or number. type: boolean format: - description: 'Format is an OpenAPI v3 format string. - Unknown formats are ignored. For a list of supported - formats please see: (of the k8s.io/apiextensions-apiserver - version we''re currently using) https://github.com/kubernetes/apiextensions-apiserver/blob/master/pkg/apiserver/validation/formats.go - NOTE: Can only be set if type is string.' + description: |- + Format is an OpenAPI v3 format string. Unknown formats are ignored. + For a list of supported formats please see: (of the k8s.io/apiextensions-apiserver version we're currently using) + https://github.com/kubernetes/apiextensions-apiserver/blob/master/pkg/apiserver/validation/formats.go + NOTE: Can only be set if type is string. type: string items: - description: 'Items specifies fields of an array. NOTE: - Can only be set if type is array. NOTE: This field - uses PreserveUnknownFields and Schemaless, because - recursive validation is not possible.' + description: |- + Items specifies fields of an array. + NOTE: Can only be set if type is array. + NOTE: This field uses PreserveUnknownFields and Schemaless, + because recursive validation is not possible. x-kubernetes-preserve-unknown-fields: true maxItems: - description: 'MaxItems is the max length of an array - variable. NOTE: Can only be set if type is array.' + description: |- + MaxItems is the max length of an array variable. + NOTE: Can only be set if type is array. format: int64 type: integer maxLength: - description: 'MaxLength is the max length of a string - variable. NOTE: Can only be set if type is string.' + description: |- + MaxLength is the max length of a string variable. + NOTE: Can only be set if type is string. format: int64 type: integer maximum: - description: 'Maximum is the maximum of an integer or - number variable. If ExclusiveMaximum is false, the - variable is valid if it is lower than, or equal to, - the value of Maximum. If ExclusiveMaximum is true, - the variable is valid if it is strictly lower than - the value of Maximum. NOTE: Can only be set if type - is integer or number.' + description: |- + Maximum is the maximum of an integer or number variable. + If ExclusiveMaximum is false, the variable is valid if it is lower than, or equal to, the value of Maximum. + If ExclusiveMaximum is true, the variable is valid if it is strictly lower than the value of Maximum. + NOTE: Can only be set if type is integer or number. format: int64 type: integer minItems: - description: 'MinItems is the min length of an array - variable. NOTE: Can only be set if type is array.' + description: |- + MinItems is the min length of an array variable. + NOTE: Can only be set if type is array. format: int64 type: integer minLength: - description: 'MinLength is the min length of a string - variable. NOTE: Can only be set if type is string.' + description: |- + MinLength is the min length of a string variable. + NOTE: Can only be set if type is string. format: int64 type: integer minimum: - description: 'Minimum is the minimum of an integer or - number variable. If ExclusiveMinimum is false, the - variable is valid if it is greater than, or equal - to, the value of Minimum. If ExclusiveMinimum is true, - the variable is valid if it is strictly greater than - the value of Minimum. NOTE: Can only be set if type - is integer or number.' + description: |- + Minimum is the minimum of an integer or number variable. + If ExclusiveMinimum is false, the variable is valid if it is greater than, or equal to, the value of Minimum. + If ExclusiveMinimum is true, the variable is valid if it is strictly greater than the value of Minimum. + NOTE: Can only be set if type is integer or number. format: int64 type: integer pattern: - description: 'Pattern is the regex which a string variable - must match. NOTE: Can only be set if type is string.' + description: |- + Pattern is the regex which a string variable must match. + NOTE: Can only be set if type is string. type: string properties: - description: 'Properties specifies fields of an object. - NOTE: Can only be set if type is object. NOTE: Properties - is mutually exclusive with AdditionalProperties. NOTE: - This field uses PreserveUnknownFields and Schemaless, - because recursive validation is not possible.' + description: |- + Properties specifies fields of an object. + NOTE: Can only be set if type is object. + NOTE: Properties is mutually exclusive with AdditionalProperties. + NOTE: This field uses PreserveUnknownFields and Schemaless, + because recursive validation is not possible. x-kubernetes-preserve-unknown-fields: true required: - description: 'Required specifies which fields of an - object are required. NOTE: Can only be set if type - is object.' + description: |- + Required specifies which fields of an object are required. + NOTE: Can only be set if type is object. items: type: string type: array type: - description: 'Type is the type of the variable. Valid - values are: object, array, string, integer, number - or boolean.' + description: |- + Type is the type of the variable. + Valid values are: object, array, string, integer, number or boolean. type: string uniqueItems: - description: 'UniqueItems specifies if items in an array - must be unique. NOTE: Can only be set if type is array.' + description: |- + UniqueItems specifies if items in an array must be unique. + NOTE: Can only be set if type is array. type: boolean x-kubernetes-preserve-unknown-fields: - description: XPreserveUnknownFields allows setting fields - in a variable object which are not defined in the - variable schema. This affects fields recursively, - except if nested properties or additionalProperties - are specified in the schema. + description: |- + XPreserveUnknownFields allows setting fields in a variable object + which are not defined in the variable schema. This affects fields recursively, + except if nested properties or additionalProperties are specified in the schema. type: boolean required: - type @@ -1020,30 +1160,31 @@ spec: type: object type: array workers: - description: Workers describes the worker nodes for the cluster. It - is a collection of node types which can be used to create the worker - nodes of the cluster. + description: |- + Workers describes the worker nodes for the cluster. + It is a collection of node types which can be used to create + the worker nodes of the cluster. properties: machineDeployments: - description: MachineDeployments is a list of machine deployment - classes that can be used to create a set of worker nodes. + description: |- + MachineDeployments is a list of machine deployment classes that can be used to create + a set of worker nodes. items: - description: MachineDeploymentClass serves as a template to - define a set of worker nodes of the cluster provisioned using - the `ClusterClass`. + description: |- + MachineDeploymentClass serves as a template to define a set of worker nodes of the cluster + provisioned using the `ClusterClass`. properties: class: - description: Class denotes a type of worker node present - in the cluster, this name MUST be unique within a ClusterClass - and can be referenced in the Cluster to create a managed - MachineDeployment. + description: |- + Class denotes a type of worker node present in the cluster, + this name MUST be unique within a ClusterClass and can be referenced + in the Cluster to create a managed MachineDeployment. type: string failureDomain: - description: 'FailureDomain is the failure domain the machines - will be created in. Must match a key in the FailureDomains - map stored on the cluster object. NOTE: This value can - be overridden while defining a Cluster.Topology using - this MachineDeploymentClass.' + description: |- + FailureDomain is the failure domain the machines will be created in. + Must match a key in the FailureDomains map stored on the cluster object. + NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. type: string machineHealthCheck: description: MachineHealthCheck defines a MachineHealthCheck @@ -1053,75 +1194,77 @@ spec: anyOf: - type: integer - type: string - description: Any further remediation is only allowed - if at most "MaxUnhealthy" machines selected by "selector" - are not healthy. + description: |- + Any further remediation is only allowed if at most "MaxUnhealthy" machines selected by + "selector" are not healthy. x-kubernetes-int-or-string: true nodeStartupTimeout: - description: Machines older than this duration without - a node will be considered to have failed and will - be remediated. If you wish to disable this feature, - set the value explicitly to 0. + description: |- + Machines older than this duration without a node will be considered to have + failed and will be remediated. + If you wish to disable this feature, set the value explicitly to 0. type: string remediationTemplate: - description: "RemediationTemplate is a reference to - a remediation template provided by an infrastructure - provider. \n This field is completely optional, when - filled, the MachineHealthCheck controller creates - a new object from the template referenced and hands - off remediation of the machine to a controller that - lives outside of Cluster API." + description: |- + RemediationTemplate is a reference to a remediation template + provided by an infrastructure provider. + + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to - the name of the container that triggered the event) - or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax - is chosen only to have some well-defined way of - referencing a part of an object. TODO: this design - is not final and this field is subject to change - in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which - this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic unhealthyConditions: - description: UnhealthyConditions contains a list of - the conditions that determine whether a node is considered - unhealthy. The conditions are combined in a logical - OR, i.e. if any of the conditions is met, the node - is unhealthy. + description: |- + UnhealthyConditions contains a list of the conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. items: - description: UnhealthyCondition represents a Node - condition type and value with a timeout specified - as a duration. When the named condition has been - in the given status for at least the timeout value, - a node is considered unhealthy. + description: |- + UnhealthyCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. properties: status: minLength: 1 @@ -1138,65 +1281,76 @@ spec: type: object type: array unhealthyRange: - description: 'Any further remediation is only allowed - if the number of machines selected by "selector" as - not healthy is within the range of "UnhealthyRange". - Takes precedence over MaxUnhealthy. Eg. "[3-5]" - - This means that remediation will be allowed only when: + description: |- + Any further remediation is only allowed if the number of machines selected by "selector" as not healthy + is within the range of "UnhealthyRange". Takes precedence over MaxUnhealthy. + Eg. "[3-5]" - This means that remediation will be allowed only when: (a) there are at least 3 unhealthy machines (and) - (b) there are at most 5 unhealthy machines' + (b) there are at most 5 unhealthy machines pattern: ^\[[0-9]+-[0-9]+\]$ type: string type: object minReadySeconds: - description: 'Minimum number of seconds for which a newly - created machine should be ready. Defaults to 0 (machine - will be considered available as soon as it is ready) NOTE: - This value can be overridden while defining a Cluster.Topology - using this MachineDeploymentClass.' + description: |- + Minimum number of seconds for which a newly created machine should + be ready. + Defaults to 0 (machine will be considered available as soon as it + is ready) + NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. format: int32 type: integer + namingStrategy: + description: NamingStrategy allows changing the naming pattern + used when creating the MachineDeployment. + properties: + template: + description: |- + Template defines the template to use for generating the name of the MachineDeployment object. + If not defined, it will fallback to `{{ .cluster.name }}-{{ .machineDeployment.topologyName }}-{{ .random }}`. + If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + get concatenated with a random suffix of length 5. + The templating mechanism provides the following arguments: + * `.cluster.name`: The name of the cluster object. + * `.random`: A random alphanumeric string, without vowels, of length 5. + * `.machineDeployment.topologyName`: The name of the MachineDeployment topology (Cluster.spec.topology.workers.machineDeployments[].name). + type: string + type: object nodeDeletionTimeout: - description: 'NodeDeletionTimeout defines how long the controller - will attempt to delete the Node that the Machine hosts - after the Machine is marked for deletion. A duration of - 0 will retry deletion indefinitely. Defaults to 10 seconds. - NOTE: This value can be overridden while defining a Cluster.Topology - using this MachineDeploymentClass.' + description: |- + NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. + NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. type: string nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time - that the controller will spend on draining a node. The - default value is 0, meaning that the node can be drained - without any time limitations. NOTE: NodeDrainTimeout is - different from `kubectl drain --timeout` NOTE: This value - can be overridden while defining a Cluster.Topology using - this MachineDeploymentClass.' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. type: string nodeVolumeDetachTimeout: - description: 'NodeVolumeDetachTimeout is the total amount - of time that the controller will spend on waiting for - all volumes to be detached. The default value is 0, meaning - that the volumes can be detached without any time limitations. - NOTE: This value can be overridden while defining a Cluster.Topology - using this MachineDeploymentClass.' + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. type: string strategy: - description: 'The deployment strategy to use to replace - existing machines with new ones. NOTE: This value can - be overridden while defining a Cluster.Topology using - this MachineDeploymentClass.' + description: |- + The deployment strategy to use to replace existing machines with + new ones. + NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. properties: rollingUpdate: - description: Rolling update config params. Present only - if MachineDeploymentStrategyType = RollingUpdate. + description: |- + Rolling update config params. Present only if + MachineDeploymentStrategyType = RollingUpdate. properties: deletePolicy: - description: DeletePolicy defines the policy used - by the MachineDeployment to identify nodes to - delete when downscaling. Valid values are "Random, - "Newest", "Oldest" When no value is supplied, - the default DeletePolicy of MachineSet is used + description: |- + DeletePolicy defines the policy used by the MachineDeployment to identify nodes to delete when downscaling. + Valid values are "Random, "Newest", "Oldest" + When no value is supplied, the default DeletePolicy of MachineSet is used enum: - Random - Newest @@ -1206,101 +1360,316 @@ spec: anyOf: - type: integer - type: string - description: 'The maximum number of machines that - can be scheduled above the desired number of machines. - Value can be an absolute number (ex: 5) or a percentage - of desired machines (ex: 10%). This can not be - 0 if MaxUnavailable is 0. Absolute number is calculated - from percentage by rounding up. Defaults to 1. - Example: when this is set to 30%, the new MachineSet - can be scaled up immediately when the rolling - update starts, such that the total number of old - and new machines do not exceed 130% of desired - machines. Once old machines have been killed, - new MachineSet can be scaled up further, ensuring - that total number of machines running at any time - during the update is at most 130% of desired machines.' + description: |- + The maximum number of machines that can be scheduled above the + desired number of machines. + Value can be an absolute number (ex: 5) or a percentage of + desired machines (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 1. + Example: when this is set to 30%, the new MachineSet can be scaled + up immediately when the rolling update starts, such that the total + number of old and new machines do not exceed 130% of desired + machines. Once old machines have been killed, new MachineSet can + be scaled up further, ensuring that total number of machines running + at any time during the update is at most 130% of desired machines. x-kubernetes-int-or-string: true maxUnavailable: anyOf: - type: integer - type: string - description: 'The maximum number of machines that - can be unavailable during the update. Value can - be an absolute number (ex: 5) or a percentage - of desired machines (ex: 10%). Absolute number - is calculated from percentage by rounding down. - This can not be 0 if MaxSurge is 0. Defaults to - 0. Example: when this is set to 30%, the old MachineSet - can be scaled down to 70% of desired machines - immediately when the rolling update starts. Once - new machines are ready, old MachineSet can be - scaled down further, followed by scaling up the - new MachineSet, ensuring that the total number - of machines available at all times during the - update is at least 70% of desired machines.' + description: |- + The maximum number of machines that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired + machines (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 0. + Example: when this is set to 30%, the old MachineSet can be scaled + down to 70% of desired machines immediately when the rolling update + starts. Once new machines are ready, old MachineSet can be scaled + down further, followed by scaling up the new MachineSet, ensuring + that the total number of machines available at all times + during the update is at least 70% of desired machines. x-kubernetes-int-or-string: true type: object type: - description: Type of deployment. Default is RollingUpdate. + description: |- + Type of deployment. Allowed values are RollingUpdate and OnDelete. + The default is RollingUpdate. enum: - RollingUpdate - OnDelete type: string type: object template: - description: Template is a local struct containing a collection - of templates for creation of MachineDeployment objects - representing a set of worker nodes. + description: |- + Template is a local struct containing a collection of templates for creation of + MachineDeployment objects representing a set of worker nodes. + properties: + bootstrap: + description: |- + Bootstrap contains the bootstrap template reference to be used + for the creation of worker Machines. + properties: + ref: + description: |- + Ref is a required reference to a custom resource + offered by a provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ref + type: object + infrastructure: + description: |- + Infrastructure contains the infrastructure template reference to be used + for the creation of worker Machines. + properties: + ref: + description: |- + Ref is a required reference to a custom resource + offered by a provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ref + type: object + metadata: + description: |- + Metadata is the metadata applied to the MachineDeployment and the machines of the MachineDeployment. + At runtime this metadata is merged with the corresponding metadata from the topology. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + required: + - bootstrap + - infrastructure + type: object + required: + - class + - template + type: object + type: array + machinePools: + description: |- + MachinePools is a list of machine pool classes that can be used to create + a set of worker nodes. + items: + description: |- + MachinePoolClass serves as a template to define a pool of worker nodes of the cluster + provisioned using `ClusterClass`. + properties: + class: + description: |- + Class denotes a type of machine pool present in the cluster, + this name MUST be unique within a ClusterClass and can be referenced + in the Cluster to create a managed MachinePool. + type: string + failureDomains: + description: |- + FailureDomains is the list of failure domains the MachinePool should be attached to. + Must match a key in the FailureDomains map stored on the cluster object. + NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. + items: + type: string + type: array + minReadySeconds: + description: |- + Minimum number of seconds for which a newly created machine pool should + be ready. + Defaults to 0 (machine will be considered available as soon as it + is ready) + NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. + format: int32 + type: integer + namingStrategy: + description: NamingStrategy allows changing the naming pattern + used when creating the MachinePool. + properties: + template: + description: |- + Template defines the template to use for generating the name of the MachinePool object. + If not defined, it will fallback to `{{ .cluster.name }}-{{ .machinePool.topologyName }}-{{ .random }}`. + If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + get concatenated with a random suffix of length 5. + The templating mechanism provides the following arguments: + * `.cluster.name`: The name of the cluster object. + * `.random`: A random alphanumeric string, without vowels, of length 5. + * `.machinePool.topologyName`: The name of the MachinePool topology (Cluster.spec.topology.workers.machinePools[].name). + type: string + type: object + nodeDeletionTimeout: + description: |- + NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine Pool is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. + NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. + type: string + nodeDrainTimeout: + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. + type: string + nodeVolumeDetachTimeout: + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. + type: string + template: + description: |- + Template is a local struct containing a collection of templates for creation of + MachinePools objects representing a pool of worker nodes. properties: bootstrap: - description: Bootstrap contains the bootstrap template - reference to be used for the creation of worker Machines. + description: |- + Bootstrap contains the bootstrap template reference to be used + for the creation of the Machines in the MachinePool. properties: ref: - description: Ref is a required reference to a custom - resource offered by a provider. + description: |- + Ref is a required reference to a custom resource + offered by a provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an - object instead of an entire object, this string - should contain a valid JSON/Go field access - statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to - a container within a pod, this would take - on a value like: "spec.containers{name}" (where - "name" refers to the name of the container - that triggered the event) or if no container - name is specified "spec.containers[2]" (container - with index 2 in this pod). This syntax is - chosen only to have some well-defined way - of referencing a part of an object. TODO: - this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which - this reference is made, if any. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -1308,54 +1677,53 @@ spec: - ref type: object infrastructure: - description: Infrastructure contains the infrastructure - template reference to be used for the creation of - worker Machines. + description: |- + Infrastructure contains the infrastructure template reference to be used + for the creation of the MachinePool. properties: ref: - description: Ref is a required reference to a custom - resource offered by a provider. + description: |- + Ref is a required reference to a custom resource + offered by a provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an - object instead of an entire object, this string - should contain a valid JSON/Go field access - statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to - a container within a pod, this would take - on a value like: "spec.containers{name}" (where - "name" refers to the name of the container - that triggered the event) or if no container - name is specified "spec.containers[2]" (container - with index 2 in this pod). This syntax is - chosen only to have some well-defined way - of referencing a part of an object. TODO: - this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which - this reference is made, if any. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -1363,27 +1731,27 @@ spec: - ref type: object metadata: - description: Metadata is the metadata applied to the - MachineDeployment and the machines of the MachineDeployment. - At runtime this metadata is merged with the corresponding - metadata from the topology. + description: |- + Metadata is the metadata applied to the MachinePool. + At runtime this metadata is merged with the corresponding metadata from the topology. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key - value map stored with a resource that may be set - by external tools to store and retrieve arbitrary - metadata. They are not queryable and should be - preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that - can be used to organize and categorize (scope - and select) objects. May match selectors of replication - controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object required: @@ -1407,37 +1775,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -1464,47 +1832,70 @@ spec: a variable which appears in the status of a ClusterClass. properties: from: - description: From specifies the origin of the variable - definition. This will be `inline` for variables defined - in the ClusterClass or the name of a patch defined in - the ClusterClass for variables discovered from a DiscoverVariables - runtime extensions. + description: |- + From specifies the origin of the variable definition. + This will be `inline` for variables defined in the ClusterClass or the name of a patch defined in the ClusterClass + for variables discovered from a DiscoverVariables runtime extensions. type: string + metadata: + description: |- + Metadata is the metadata of a variable. + It can be used to add additional data for higher level tools to + a ClusterClassVariable. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map that can be used to store and + retrieve arbitrary metadata. + They are not queryable. + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) variables. + type: object + type: object required: - description: 'Required specifies if the variable is required. - Note: this applies to the variable as a whole and thus - the top-level object defined in the schema. If nested - fields are required, this will be specified inside the - schema.' + description: |- + Required specifies if the variable is required. + Note: this applies to the variable as a whole and thus the + top-level object defined in the schema. If nested fields are + required, this will be specified inside the schema. type: boolean schema: description: Schema defines the schema of the variable. properties: openAPIV3Schema: - description: OpenAPIV3Schema defines the schema of - a variable via OpenAPI v3 schema. The schema is - a subset of the schema used in Kubernetes CRDs. + description: |- + OpenAPIV3Schema defines the schema of a variable via OpenAPI v3 + schema. The schema is a subset of the schema used in + Kubernetes CRDs. properties: additionalProperties: - description: 'AdditionalProperties specifies the - schema of values in a map (keys are always strings). - NOTE: Can only be set if type is object. NOTE: - AdditionalProperties is mutually exclusive with - Properties. NOTE: This field uses PreserveUnknownFields - and Schemaless, because recursive validation - is not possible.' + description: |- + AdditionalProperties specifies the schema of values in a map (keys are always strings). + NOTE: Can only be set if type is object. + NOTE: AdditionalProperties is mutually exclusive with Properties. + NOTE: This field uses PreserveUnknownFields and Schemaless, + because recursive validation is not possible. x-kubernetes-preserve-unknown-fields: true default: - description: 'Default is the default value of - the variable. NOTE: Can be set for all types.' + description: |- + Default is the default value of the variable. + NOTE: Can be set for all types. x-kubernetes-preserve-unknown-fields: true description: description: Description is a human-readable description of this variable. type: string enum: - description: 'Enum is the list of valid values - of the variable. NOTE: Can be set for all types.' + description: |- + Enum is the list of valid values of the variable. + NOTE: Can be set for all types. items: x-kubernetes-preserve-unknown-fields: true type: array @@ -1512,108 +1903,104 @@ spec: description: Example is an example for this variable. x-kubernetes-preserve-unknown-fields: true exclusiveMaximum: - description: 'ExclusiveMaximum specifies if the - Maximum is exclusive. NOTE: Can only be set - if type is integer or number.' + description: |- + ExclusiveMaximum specifies if the Maximum is exclusive. + NOTE: Can only be set if type is integer or number. type: boolean exclusiveMinimum: - description: 'ExclusiveMinimum specifies if the - Minimum is exclusive. NOTE: Can only be set - if type is integer or number.' + description: |- + ExclusiveMinimum specifies if the Minimum is exclusive. + NOTE: Can only be set if type is integer or number. type: boolean format: - description: 'Format is an OpenAPI v3 format string. - Unknown formats are ignored. For a list of supported - formats please see: (of the k8s.io/apiextensions-apiserver - version we''re currently using) https://github.com/kubernetes/apiextensions-apiserver/blob/master/pkg/apiserver/validation/formats.go - NOTE: Can only be set if type is string.' + description: |- + Format is an OpenAPI v3 format string. Unknown formats are ignored. + For a list of supported formats please see: (of the k8s.io/apiextensions-apiserver version we're currently using) + https://github.com/kubernetes/apiextensions-apiserver/blob/master/pkg/apiserver/validation/formats.go + NOTE: Can only be set if type is string. type: string items: - description: 'Items specifies fields of an array. - NOTE: Can only be set if type is array. NOTE: - This field uses PreserveUnknownFields and Schemaless, - because recursive validation is not possible.' + description: |- + Items specifies fields of an array. + NOTE: Can only be set if type is array. + NOTE: This field uses PreserveUnknownFields and Schemaless, + because recursive validation is not possible. x-kubernetes-preserve-unknown-fields: true maxItems: - description: 'MaxItems is the max length of an - array variable. NOTE: Can only be set if type - is array.' + description: |- + MaxItems is the max length of an array variable. + NOTE: Can only be set if type is array. format: int64 type: integer maxLength: - description: 'MaxLength is the max length of a - string variable. NOTE: Can only be set if type - is string.' + description: |- + MaxLength is the max length of a string variable. + NOTE: Can only be set if type is string. format: int64 type: integer maximum: - description: 'Maximum is the maximum of an integer - or number variable. If ExclusiveMaximum is false, - the variable is valid if it is lower than, or - equal to, the value of Maximum. If ExclusiveMaximum - is true, the variable is valid if it is strictly - lower than the value of Maximum. NOTE: Can only - be set if type is integer or number.' + description: |- + Maximum is the maximum of an integer or number variable. + If ExclusiveMaximum is false, the variable is valid if it is lower than, or equal to, the value of Maximum. + If ExclusiveMaximum is true, the variable is valid if it is strictly lower than the value of Maximum. + NOTE: Can only be set if type is integer or number. format: int64 type: integer minItems: - description: 'MinItems is the min length of an - array variable. NOTE: Can only be set if type - is array.' + description: |- + MinItems is the min length of an array variable. + NOTE: Can only be set if type is array. format: int64 type: integer minLength: - description: 'MinLength is the min length of a - string variable. NOTE: Can only be set if type - is string.' + description: |- + MinLength is the min length of a string variable. + NOTE: Can only be set if type is string. format: int64 type: integer minimum: - description: 'Minimum is the minimum of an integer - or number variable. If ExclusiveMinimum is false, - the variable is valid if it is greater than, - or equal to, the value of Minimum. If ExclusiveMinimum - is true, the variable is valid if it is strictly - greater than the value of Minimum. NOTE: Can - only be set if type is integer or number.' + description: |- + Minimum is the minimum of an integer or number variable. + If ExclusiveMinimum is false, the variable is valid if it is greater than, or equal to, the value of Minimum. + If ExclusiveMinimum is true, the variable is valid if it is strictly greater than the value of Minimum. + NOTE: Can only be set if type is integer or number. format: int64 type: integer pattern: - description: 'Pattern is the regex which a string - variable must match. NOTE: Can only be set if - type is string.' + description: |- + Pattern is the regex which a string variable must match. + NOTE: Can only be set if type is string. type: string properties: - description: 'Properties specifies fields of an - object. NOTE: Can only be set if type is object. - NOTE: Properties is mutually exclusive with - AdditionalProperties. NOTE: This field uses - PreserveUnknownFields and Schemaless, because - recursive validation is not possible.' + description: |- + Properties specifies fields of an object. + NOTE: Can only be set if type is object. + NOTE: Properties is mutually exclusive with AdditionalProperties. + NOTE: This field uses PreserveUnknownFields and Schemaless, + because recursive validation is not possible. x-kubernetes-preserve-unknown-fields: true required: - description: 'Required specifies which fields - of an object are required. NOTE: Can only be - set if type is object.' + description: |- + Required specifies which fields of an object are required. + NOTE: Can only be set if type is object. items: type: string type: array type: - description: 'Type is the type of the variable. - Valid values are: object, array, string, integer, - number or boolean.' + description: |- + Type is the type of the variable. + Valid values are: object, array, string, integer, number or boolean. type: string uniqueItems: - description: 'UniqueItems specifies if items in - an array must be unique. NOTE: Can only be set - if type is array.' + description: |- + UniqueItems specifies if items in an array must be unique. + NOTE: Can only be set if type is array. type: boolean x-kubernetes-preserve-unknown-fields: - description: XPreserveUnknownFields allows setting - fields in a variable object which are not defined - in the variable schema. This affects fields - recursively, except if nested properties or - additionalProperties are specified in the schema. + description: |- + XPreserveUnknownFields allows setting fields in a variable object + which are not defined in the variable schema. This affects fields recursively, + except if nested properties or additionalProperties are specified in the schema. type: boolean required: - type diff --git a/config/crd/bases/cluster.x-k8s.io_clusters.yaml b/config/crd/bases/cluster.x-k8s.io_clusters.yaml index 0f5d6fd43c69..ad966e7c54ee 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusters.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusters.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: clusters.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -24,20 +23,26 @@ spec: jsonPath: .status.phase name: Phase type: string + deprecated: true name: v1alpha3 schema: openAPIV3Schema: description: Cluster is the Schema for the clusters API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -48,8 +53,9 @@ spec: description: Cluster network configuration. properties: apiServerPort: - description: APIServerPort specifies the port the API Server should - bind to. Defaults to 6443. + description: |- + APIServerPort specifies the port the API Server should bind to. + Defaults to 6443. format: int32 type: integer pods: @@ -92,80 +98,94 @@ spec: - port type: object controlPlaneRef: - description: ControlPlaneRef is an optional reference to a provider-specific - resource that holds the details for provisioning the Control Plane - for a Cluster. + description: |- + ControlPlaneRef is an optional reference to a provider-specific resource that holds + the details for provisioning the Control Plane for a Cluster. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic infrastructureRef: - description: InfrastructureRef is a reference to a provider-specific - resource that holds the details for provisioning infrastructure - for a cluster in said provider. + description: |- + InfrastructureRef is a reference to a provider-specific resource that holds the details + for provisioning infrastructure for a cluster in said provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -184,37 +204,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -230,9 +250,9 @@ spec: type: boolean failureDomains: additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure - domains a cluster can optionally span across. + description: |- + FailureDomainSpec is the Schema for Cluster API failure domains. + It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: additionalProperties: @@ -249,13 +269,15 @@ spec: from the infrastructure provider. type: object failureMessage: - description: FailureMessage indicates that there is a fatal problem - reconciling the state, and will be set to a descriptive error message. + description: |- + FailureMessage indicates that there is a fatal problem reconciling the + state, and will be set to a descriptive error message. type: string failureReason: - description: FailureReason indicates that there is a fatal problem - reconciling the state, and will be set to a token value suitable - for programmatic interpretation. + description: |- + FailureReason indicates that there is a fatal problem reconciling the + state, and will be set to a token value suitable for + programmatic interpretation. type: string infrastructureReady: description: InfrastructureReady is the state of the infrastructure @@ -267,12 +289,13 @@ spec: format: int64 type: integer phase: - description: Phase represents the current phase of cluster actuation. + description: |- + Phase represents the current phase of cluster actuation. E.g. Pending, Running, Terminating, Failed etc. type: string type: object type: object - served: true + served: false storage: false subresources: status: {} @@ -285,21 +308,30 @@ spec: jsonPath: .status.phase name: Phase type: string + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "Cluster is the Schema for the clusters API. \n Deprecated: This - type will be removed in one of the next releases." + description: |- + Cluster is the Schema for the clusters API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -310,8 +342,9 @@ spec: description: Cluster network configuration. properties: apiServerPort: - description: APIServerPort specifies the port the API Server should - bind to. Defaults to 6443. + description: |- + APIServerPort specifies the port the API Server should bind to. + Defaults to 6443. format: int32 type: integer pods: @@ -354,80 +387,94 @@ spec: - port type: object controlPlaneRef: - description: ControlPlaneRef is an optional reference to a provider-specific - resource that holds the details for provisioning the Control Plane - for a Cluster. + description: |- + ControlPlaneRef is an optional reference to a provider-specific resource that holds + the details for provisioning the Control Plane for a Cluster. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic infrastructureRef: - description: InfrastructureRef is a reference to a provider-specific - resource that holds the details for provisioning infrastructure - for a cluster in said provider. + description: |- + InfrastructureRef is a reference to a provider-specific resource that holds the details + for provisioning infrastructure for a cluster in said provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -436,10 +483,11 @@ spec: the Cluster and all its associated objects. type: boolean topology: - description: 'This encapsulates the topology for the cluster. NOTE: - It is required to enable the ClusterTopology feature gate flag to - activate managed topologies support; this feature is highly experimental, - and parts of it might still be not implemented.' + description: |- + This encapsulates the topology for the cluster. + NOTE: It is required to enable the ClusterTopology + feature gate flag to activate managed topologies support; + this feature is highly experimental, and parts of it might still be not implemented. properties: class: description: The name of the ClusterClass object to create the @@ -449,108 +497,106 @@ spec: description: ControlPlane describes the cluster control plane. properties: metadata: - description: "Metadata is the metadata applied to the machines - of the ControlPlane. At runtime this metadata is merged - with the corresponding metadata from the ClusterClass. \n - This field is supported if and only if the control plane - provider template referenced in the ClusterClass is Machine - based." + description: |- + Metadata is the metadata applied to the machines of the ControlPlane. + At runtime this metadata is merged with the corresponding metadata from the ClusterClass. + + + This field is supported if and only if the control plane provider template + referenced in the ClusterClass is Machine based. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value - map stored with a resource that may be set by external - tools to store and retrieve arbitrary metadata. They - are not queryable and should be preserved when modifying - objects. More info: http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be - used to organize and categorize (scope and select) objects. - May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object replicas: - description: Replicas is the number of control plane nodes. - If the value is nil, the ControlPlane object is created - without the number of Replicas and it's assumed that the - control plane controller does not implement support for - this field. When specified against a control plane provider - that lacks support for this field, this value will be ignored. + description: |- + Replicas is the number of control plane nodes. + If the value is nil, the ControlPlane object is created without the number of Replicas + and it's assumed that the control plane controller does not implement support for this field. + When specified against a control plane provider that lacks support for this field, this value will be ignored. format: int32 type: integer type: object rolloutAfter: - description: RolloutAfter performs a rollout of the entire cluster - one component at a time, control plane first and then machine - deployments. + description: |- + RolloutAfter performs a rollout of the entire cluster one component at a time, + control plane first and then machine deployments. format: date-time type: string version: description: The Kubernetes version of the cluster. type: string workers: - description: Workers encapsulates the different constructs that - form the worker nodes for the cluster. + description: |- + Workers encapsulates the different constructs that form the worker nodes + for the cluster. properties: machineDeployments: description: MachineDeployments is a list of machine deployments in the cluster. items: - description: MachineDeploymentTopology specifies the different - parameters for a set of worker nodes in the topology. - This set of nodes is managed by a MachineDeployment object - whose lifecycle is managed by the Cluster controller. + description: |- + MachineDeploymentTopology specifies the different parameters for a set of worker nodes in the topology. + This set of nodes is managed by a MachineDeployment object whose lifecycle is managed by the Cluster controller. properties: class: - description: Class is the name of the MachineDeploymentClass - used to create the set of worker nodes. This should - match one of the deployment classes defined in the - ClusterClass object mentioned in the `Cluster.Spec.Class` - field. + description: |- + Class is the name of the MachineDeploymentClass used to create the set of worker nodes. + This should match one of the deployment classes defined in the ClusterClass object + mentioned in the `Cluster.Spec.Class` field. type: string metadata: - description: Metadata is the metadata applied to the - machines of the MachineDeployment. At runtime this - metadata is merged with the corresponding metadata - from the ClusterClass. + description: |- + Metadata is the metadata applied to the machines of the MachineDeployment. + At runtime this metadata is merged with the corresponding metadata from the ClusterClass. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key - value map stored with a resource that may be set - by external tools to store and retrieve arbitrary - metadata. They are not queryable and should be - preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that - can be used to organize and categorize (scope - and select) objects. May match selectors of replication - controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object name: - description: Name is the unique identifier for this - MachineDeploymentTopology. The value is used with - other unique identifiers to create a MachineDeployment's - Name (e.g. cluster's name, etc). In case the name - is greater than the allowed maximum length, the values - are hashed together. + description: |- + Name is the unique identifier for this MachineDeploymentTopology. + The value is used with other unique identifiers to create a MachineDeployment's Name + (e.g. cluster's name, etc). In case the name is greater than the allowed maximum length, + the values are hashed together. type: string replicas: - description: Replicas is the number of worker nodes - belonging to this set. If the value is nil, the MachineDeployment - is created without the number of Replicas (defaulting - to zero) and it's assumed that an external entity - (like cluster autoscaler) is responsible for the management + description: |- + Replicas is the number of worker nodes belonging to this set. + If the value is nil, the MachineDeployment is created without the number of Replicas (defaulting to zero) + and it's assumed that an external entity (like cluster autoscaler) is responsible for the management of this value. format: int32 type: integer @@ -575,37 +621,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -617,9 +663,9 @@ spec: type: boolean failureDomains: additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure - domains a cluster can optionally span across. + description: |- + FailureDomainSpec is the Schema for Cluster API failure domains. + It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: additionalProperties: @@ -636,13 +682,15 @@ spec: from the infrastructure provider. type: object failureMessage: - description: FailureMessage indicates that there is a fatal problem - reconciling the state, and will be set to a descriptive error message. + description: |- + FailureMessage indicates that there is a fatal problem reconciling the + state, and will be set to a descriptive error message. type: string failureReason: - description: FailureReason indicates that there is a fatal problem - reconciling the state, and will be set to a token value suitable - for programmatic interpretation. + description: |- + FailureReason indicates that there is a fatal problem reconciling the + state, and will be set to a token value suitable for + programmatic interpretation. type: string infrastructureReady: description: InfrastructureReady is the state of the infrastructure @@ -654,16 +702,22 @@ spec: format: int64 type: integer phase: - description: Phase represents the current phase of cluster actuation. + description: |- + Phase represents the current phase of cluster actuation. E.g. Pending, Running, Terminating, Failed etc. type: string type: object type: object - served: true + served: false storage: false subresources: status: {} - additionalPrinterColumns: + - description: ClusterClass of this Cluster, empty if the Cluster is not using + a ClusterClass + jsonPath: .spec.topology.class + name: ClusterClass + type: string - description: Cluster status such as Pending/Provisioning/Provisioned/Deleting/Failed jsonPath: .status.phase name: Phase @@ -682,14 +736,19 @@ spec: description: Cluster is the Schema for the clusters API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -700,8 +759,9 @@ spec: description: Cluster network configuration. properties: apiServerPort: - description: APIServerPort specifies the port the API Server should - bind to. Defaults to 6443. + description: |- + APIServerPort specifies the port the API Server should bind to. + Defaults to 6443. format: int32 type: integer pods: @@ -744,80 +804,94 @@ spec: - port type: object controlPlaneRef: - description: ControlPlaneRef is an optional reference to a provider-specific - resource that holds the details for provisioning the Control Plane - for a Cluster. + description: |- + ControlPlaneRef is an optional reference to a provider-specific resource that holds + the details for provisioning the Control Plane for a Cluster. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic infrastructureRef: - description: InfrastructureRef is a reference to a provider-specific - resource that holds the details for provisioning infrastructure - for a cluster in said provider. + description: |- + InfrastructureRef is a reference to a provider-specific resource that holds the details + for provisioning infrastructure for a cluster in said provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -826,10 +900,11 @@ spec: the Cluster and all its associated objects. type: boolean topology: - description: 'This encapsulates the topology for the cluster. NOTE: - It is required to enable the ClusterTopology feature gate flag to - activate managed topologies support; this feature is highly experimental, - and parts of it might still be not implemented.' + description: |- + This encapsulates the topology for the cluster. + NOTE: It is required to enable the ClusterTopology + feature gate flag to activate managed topologies support; + this feature is highly experimental, and parts of it might still be not implemented. properties: class: description: The name of the ClusterClass object to create the @@ -839,92 +914,100 @@ spec: description: ControlPlane describes the cluster control plane. properties: machineHealthCheck: - description: MachineHealthCheck allows to enable, disable - and override the MachineHealthCheck configuration in the - ClusterClass for this control plane. + description: |- + MachineHealthCheck allows to enable, disable and override + the MachineHealthCheck configuration in the ClusterClass for this control plane. properties: enable: - description: "Enable controls if a MachineHealthCheck - should be created for the target machines. \n If false: - No MachineHealthCheck will be created. \n If not set(default): - A MachineHealthCheck will be created if it is defined - here or in the associated ClusterClass. If no MachineHealthCheck - is defined then none will be created. \n If true: A - MachineHealthCheck is guaranteed to be created. Cluster - validation will block if `enable` is true and no MachineHealthCheck - definition is available." + description: |- + Enable controls if a MachineHealthCheck should be created for the target machines. + + + If false: No MachineHealthCheck will be created. + + + If not set(default): A MachineHealthCheck will be created if it is defined here or + in the associated ClusterClass. If no MachineHealthCheck is defined then none will be created. + + + If true: A MachineHealthCheck is guaranteed to be created. Cluster validation will + block if `enable` is true and no MachineHealthCheck definition is available. type: boolean maxUnhealthy: anyOf: - type: integer - type: string - description: Any further remediation is only allowed if - at most "MaxUnhealthy" machines selected by "selector" - are not healthy. + description: |- + Any further remediation is only allowed if at most "MaxUnhealthy" machines selected by + "selector" are not healthy. x-kubernetes-int-or-string: true nodeStartupTimeout: - description: Machines older than this duration without - a node will be considered to have failed and will be - remediated. If you wish to disable this feature, set - the value explicitly to 0. + description: |- + Machines older than this duration without a node will be considered to have + failed and will be remediated. + If you wish to disable this feature, set the value explicitly to 0. type: string remediationTemplate: - description: "RemediationTemplate is a reference to a - remediation template provided by an infrastructure provider. - \n This field is completely optional, when filled, the - MachineHealthCheck controller creates a new object from - the template referenced and hands off remediation of - the machine to a controller that lives outside of Cluster - API." + description: |- + RemediationTemplate is a reference to a remediation template + provided by an infrastructure provider. + + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container - that triggered the event) or if no container name - is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part - of an object. TODO: this design is not final and - this field is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this - reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic unhealthyConditions: - description: UnhealthyConditions contains a list of the - conditions that determine whether a node is considered - unhealthy. The conditions are combined in a logical - OR, i.e. if any of the conditions is met, the node is - unhealthy. + description: |- + UnhealthyConditions contains a list of the conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. items: - description: UnhealthyCondition represents a Node condition - type and value with a timeout specified as a duration. When - the named condition has been in the given status for - at least the timeout value, a node is considered unhealthy. + description: |- + UnhealthyCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. properties: status: minLength: 1 @@ -941,106 +1024,105 @@ spec: type: object type: array unhealthyRange: - description: 'Any further remediation is only allowed - if the number of machines selected by "selector" as - not healthy is within the range of "UnhealthyRange". - Takes precedence over MaxUnhealthy. Eg. "[3-5]" - This - means that remediation will be allowed only when: (a) - there are at least 3 unhealthy machines (and) (b) there - are at most 5 unhealthy machines' + description: |- + Any further remediation is only allowed if the number of machines selected by "selector" as not healthy + is within the range of "UnhealthyRange". Takes precedence over MaxUnhealthy. + Eg. "[3-5]" - This means that remediation will be allowed only when: + (a) there are at least 3 unhealthy machines (and) + (b) there are at most 5 unhealthy machines pattern: ^\[[0-9]+-[0-9]+\]$ type: string type: object metadata: - description: Metadata is the metadata applied to the ControlPlane - and the Machines of the ControlPlane if the ControlPlaneTemplate - referenced by the ClusterClass is machine based. If not, - it is applied only to the ControlPlane. At runtime this - metadata is merged with the corresponding metadata from - the ClusterClass. + description: |- + Metadata is the metadata applied to the ControlPlane and the Machines of the ControlPlane + if the ControlPlaneTemplate referenced by the ClusterClass is machine based. If not, it + is applied only to the ControlPlane. + At runtime this metadata is merged with the corresponding metadata from the ClusterClass. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value - map stored with a resource that may be set by external - tools to store and retrieve arbitrary metadata. They - are not queryable and should be preserved when modifying - objects. More info: http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be - used to organize and categorize (scope and select) objects. - May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object nodeDeletionTimeout: - description: NodeDeletionTimeout defines how long the controller - will attempt to delete the Node that the Machine hosts after - the Machine is marked for deletion. A duration of 0 will - retry deletion indefinitely. Defaults to 10 seconds. + description: |- + NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. type: string nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time - that the controller will spend on draining a node. The default - value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different - from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string nodeVolumeDetachTimeout: - description: NodeVolumeDetachTimeout is the total amount of - time that the controller will spend on waiting for all volumes - to be detached. The default value is 0, meaning that the - volumes can be detached without any time limitations. + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. type: string replicas: - description: Replicas is the number of control plane nodes. - If the value is nil, the ControlPlane object is created - without the number of Replicas and it's assumed that the - control plane controller does not implement support for - this field. When specified against a control plane provider - that lacks support for this field, this value will be ignored. + description: |- + Replicas is the number of control plane nodes. + If the value is nil, the ControlPlane object is created without the number of Replicas + and it's assumed that the control plane controller does not implement support for this field. + When specified against a control plane provider that lacks support for this field, this value will be ignored. format: int32 type: integer type: object rolloutAfter: - description: RolloutAfter performs a rollout of the entire cluster - one component at a time, control plane first and then machine - deployments. + description: |- + RolloutAfter performs a rollout of the entire cluster one component at a time, + control plane first and then machine deployments. + + + Deprecated: This field has no function and is going to be removed in the next apiVersion. format: date-time type: string variables: - description: Variables can be used to customize the Cluster through - patches. They must comply to the corresponding VariableClasses - defined in the ClusterClass. + description: |- + Variables can be used to customize the Cluster through + patches. They must comply to the corresponding + VariableClasses defined in the ClusterClass. items: - description: ClusterVariable can be used to customize the Cluster - through patches. Each ClusterVariable is associated with a + description: |- + ClusterVariable can be used to customize the Cluster through patches. Each ClusterVariable is associated with a Variable definition in the ClusterClass `status` variables. properties: definitionFrom: - description: 'DefinitionFrom specifies where the definition - of this Variable is from. DefinitionFrom is `inline` when - the definition is from the ClusterClass `.spec.variables` - or the name of a patch defined in the ClusterClass `.spec.patches` - where the patch is external and provides external variables. - This field is mandatory if the variable has `DefinitionsConflict: - true` in ClusterClass `status.variables[]`' + description: |- + DefinitionFrom specifies where the definition of this Variable is from. DefinitionFrom is `inline` when the + definition is from the ClusterClass `.spec.variables` or the name of a patch defined in the ClusterClass + `.spec.patches` where the patch is external and provides external variables. + This field is mandatory if the variable has `DefinitionsConflict: true` in ClusterClass `status.variables[]` type: string name: description: Name of the variable. type: string value: - description: 'Value of the variable. Note: the value will - be validated against the schema of the corresponding ClusterClassVariable - from the ClusterClass. Note: We have to use apiextensionsv1.JSON - instead of a custom JSON type, because controller-tools - has a hard-coded schema for apiextensionsv1.JSON which - cannot be produced by another type via controller-tools, - i.e. it is not possible to have no type field. Ref: https://github.com/kubernetes-sigs/controller-tools/blob/d0e03a142d0ecdd5491593e941ee1d6b5d91dba6/pkg/crd/known_types.go#L106-L111' + description: |- + Value of the variable. + Note: the value will be validated against the schema of the corresponding ClusterClassVariable + from the ClusterClass. + Note: We have to use apiextensionsv1.JSON instead of a custom JSON type, because controller-tools has a + hard-coded schema for apiextensionsv1.JSON which cannot be produced by another type via controller-tools, + i.e. it is not possible to have no type field. + Ref: https://github.com/kubernetes-sigs/controller-tools/blob/d0e03a142d0ecdd5491593e941ee1d6b5d91dba6/pkg/crd/known_types.go#L106-L111 x-kubernetes-preserve-unknown-fields: true required: - name @@ -1051,125 +1133,124 @@ spec: description: The Kubernetes version of the cluster. type: string workers: - description: Workers encapsulates the different constructs that - form the worker nodes for the cluster. + description: |- + Workers encapsulates the different constructs that form the worker nodes + for the cluster. properties: machineDeployments: description: MachineDeployments is a list of machine deployments in the cluster. items: - description: MachineDeploymentTopology specifies the different - parameters for a set of worker nodes in the topology. - This set of nodes is managed by a MachineDeployment object - whose lifecycle is managed by the Cluster controller. + description: |- + MachineDeploymentTopology specifies the different parameters for a set of worker nodes in the topology. + This set of nodes is managed by a MachineDeployment object whose lifecycle is managed by the Cluster controller. properties: class: - description: Class is the name of the MachineDeploymentClass - used to create the set of worker nodes. This should - match one of the deployment classes defined in the - ClusterClass object mentioned in the `Cluster.Spec.Class` - field. + description: |- + Class is the name of the MachineDeploymentClass used to create the set of worker nodes. + This should match one of the deployment classes defined in the ClusterClass object + mentioned in the `Cluster.Spec.Class` field. type: string failureDomain: - description: FailureDomain is the failure domain the - machines will be created in. Must match a key in the - FailureDomains map stored on the cluster object. + description: |- + FailureDomain is the failure domain the machines will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string machineHealthCheck: - description: MachineHealthCheck allows to enable, disable - and override the MachineHealthCheck configuration - in the ClusterClass for this MachineDeployment. + description: |- + MachineHealthCheck allows to enable, disable and override + the MachineHealthCheck configuration in the ClusterClass for this MachineDeployment. properties: enable: - description: "Enable controls if a MachineHealthCheck - should be created for the target machines. \n + description: |- + Enable controls if a MachineHealthCheck should be created for the target machines. + + If false: No MachineHealthCheck will be created. - \n If not set(default): A MachineHealthCheck will - be created if it is defined here or in the associated - ClusterClass. If no MachineHealthCheck is defined - then none will be created. \n If true: A MachineHealthCheck - is guaranteed to be created. Cluster validation - will block if `enable` is true and no MachineHealthCheck - definition is available." + + + If not set(default): A MachineHealthCheck will be created if it is defined here or + in the associated ClusterClass. If no MachineHealthCheck is defined then none will be created. + + + If true: A MachineHealthCheck is guaranteed to be created. Cluster validation will + block if `enable` is true and no MachineHealthCheck definition is available. type: boolean maxUnhealthy: anyOf: - type: integer - type: string - description: Any further remediation is only allowed - if at most "MaxUnhealthy" machines selected by + description: |- + Any further remediation is only allowed if at most "MaxUnhealthy" machines selected by "selector" are not healthy. x-kubernetes-int-or-string: true nodeStartupTimeout: - description: Machines older than this duration without - a node will be considered to have failed and will - be remediated. If you wish to disable this feature, - set the value explicitly to 0. + description: |- + Machines older than this duration without a node will be considered to have + failed and will be remediated. + If you wish to disable this feature, set the value explicitly to 0. type: string remediationTemplate: - description: "RemediationTemplate is a reference - to a remediation template provided by an infrastructure - provider. \n This field is completely optional, - when filled, the MachineHealthCheck controller - creates a new object from the template referenced - and hands off remediation of the machine to a - controller that lives outside of Cluster API." + description: |- + RemediationTemplate is a reference to a remediation template + provided by an infrastructure provider. + + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an - object instead of an entire object, this string - should contain a valid JSON/Go field access - statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to - a container within a pod, this would take - on a value like: "spec.containers{name}" (where - "name" refers to the name of the container - that triggered the event) or if no container - name is specified "spec.containers[2]" (container - with index 2 in this pod). This syntax is - chosen only to have some well-defined way - of referencing a part of an object. TODO: - this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which - this reference is made, if any. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic unhealthyConditions: - description: UnhealthyConditions contains a list - of the conditions that determine whether a node - is considered unhealthy. The conditions are combined - in a logical OR, i.e. if any of the conditions - is met, the node is unhealthy. + description: |- + UnhealthyConditions contains a list of the conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. items: - description: UnhealthyCondition represents a Node - condition type and value with a timeout specified - as a duration. When the named condition has - been in the given status for at least the timeout - value, a node is considered unhealthy. + description: |- + UnhealthyCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. properties: status: minLength: 1 @@ -1186,101 +1267,94 @@ spec: type: object type: array unhealthyRange: - description: 'Any further remediation is only allowed - if the number of machines selected by "selector" - as not healthy is within the range of "UnhealthyRange". - Takes precedence over MaxUnhealthy. Eg. "[3-5]" - - This means that remediation will be allowed - only when: (a) there are at least 3 unhealthy - machines (and) (b) there are at most 5 unhealthy - machines' + description: |- + Any further remediation is only allowed if the number of machines selected by "selector" as not healthy + is within the range of "UnhealthyRange". Takes precedence over MaxUnhealthy. + Eg. "[3-5]" - This means that remediation will be allowed only when: + (a) there are at least 3 unhealthy machines (and) + (b) there are at most 5 unhealthy machines pattern: ^\[[0-9]+-[0-9]+\]$ type: string type: object metadata: - description: Metadata is the metadata applied to the - MachineDeployment and the machines of the MachineDeployment. - At runtime this metadata is merged with the corresponding - metadata from the ClusterClass. + description: |- + Metadata is the metadata applied to the MachineDeployment and the machines of the MachineDeployment. + At runtime this metadata is merged with the corresponding metadata from the ClusterClass. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key - value map stored with a resource that may be set - by external tools to store and retrieve arbitrary - metadata. They are not queryable and should be - preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that - can be used to organize and categorize (scope - and select) objects. May match selectors of replication - controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object minReadySeconds: - description: Minimum number of seconds for which a newly - created machine should be ready. Defaults to 0 (machine - will be considered available as soon as it is ready) + description: |- + Minimum number of seconds for which a newly created machine should + be ready. + Defaults to 0 (machine will be considered available as soon as it + is ready) format: int32 type: integer name: - description: Name is the unique identifier for this - MachineDeploymentTopology. The value is used with - other unique identifiers to create a MachineDeployment's - Name (e.g. cluster's name, etc). In case the name - is greater than the allowed maximum length, the values - are hashed together. + description: |- + Name is the unique identifier for this MachineDeploymentTopology. + The value is used with other unique identifiers to create a MachineDeployment's Name + (e.g. cluster's name, etc). In case the name is greater than the allowed maximum length, + the values are hashed together. type: string nodeDeletionTimeout: - description: NodeDeletionTimeout defines how long the - controller will attempt to delete the Node that the - Machine hosts after the Machine is marked for deletion. - A duration of 0 will retry deletion indefinitely. + description: |- + NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. type: string nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of - time that the controller will spend on draining a - node. The default value is 0, meaning that the node - can be drained without any time limitations. NOTE: - NodeDrainTimeout is different from `kubectl drain - --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string nodeVolumeDetachTimeout: - description: NodeVolumeDetachTimeout is the total amount - of time that the controller will spend on waiting - for all volumes to be detached. The default value - is 0, meaning that the volumes can be detached without - any time limitations. + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. type: string replicas: - description: Replicas is the number of worker nodes - belonging to this set. If the value is nil, the MachineDeployment - is created without the number of Replicas (defaulting - to 1) and it's assumed that an external entity (like - cluster autoscaler) is responsible for the management + description: |- + Replicas is the number of worker nodes belonging to this set. + If the value is nil, the MachineDeployment is created without the number of Replicas (defaulting to 1) + and it's assumed that an external entity (like cluster autoscaler) is responsible for the management of this value. format: int32 type: integer strategy: - description: The deployment strategy to use to replace - existing machines with new ones. + description: |- + The deployment strategy to use to replace existing machines with + new ones. properties: rollingUpdate: - description: Rolling update config params. Present - only if MachineDeploymentStrategyType = RollingUpdate. + description: |- + Rolling update config params. Present only if + MachineDeploymentStrategyType = RollingUpdate. properties: deletePolicy: - description: DeletePolicy defines the policy - used by the MachineDeployment to identify - nodes to delete when downscaling. Valid values - are "Random, "Newest", "Oldest" When no value - is supplied, the default DeletePolicy of MachineSet - is used + description: |- + DeletePolicy defines the policy used by the MachineDeployment to identify nodes to delete when downscaling. + Valid values are "Random, "Newest", "Oldest" + When no value is supplied, the default DeletePolicy of MachineSet is used enum: - Random - Newest @@ -1290,46 +1364,44 @@ spec: anyOf: - type: integer - type: string - description: 'The maximum number of machines - that can be scheduled above the desired number - of machines. Value can be an absolute number - (ex: 5) or a percentage of desired machines - (ex: 10%). This can not be 0 if MaxUnavailable - is 0. Absolute number is calculated from percentage - by rounding up. Defaults to 1. Example: when - this is set to 30%, the new MachineSet can - be scaled up immediately when the rolling - update starts, such that the total number - of old and new machines do not exceed 130% - of desired machines. Once old machines have - been killed, new MachineSet can be scaled - up further, ensuring that total number of - machines running at any time during the update - is at most 130% of desired machines.' + description: |- + The maximum number of machines that can be scheduled above the + desired number of machines. + Value can be an absolute number (ex: 5) or a percentage of + desired machines (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 1. + Example: when this is set to 30%, the new MachineSet can be scaled + up immediately when the rolling update starts, such that the total + number of old and new machines do not exceed 130% of desired + machines. Once old machines have been killed, new MachineSet can + be scaled up further, ensuring that total number of machines running + at any time during the update is at most 130% of desired machines. x-kubernetes-int-or-string: true maxUnavailable: anyOf: - type: integer - type: string - description: 'The maximum number of machines - that can be unavailable during the update. - Value can be an absolute number (ex: 5) or - a percentage of desired machines (ex: 10%). - Absolute number is calculated from percentage - by rounding down. This can not be 0 if MaxSurge - is 0. Defaults to 0. Example: when this is - set to 30%, the old MachineSet can be scaled - down to 70% of desired machines immediately - when the rolling update starts. Once new machines - are ready, old MachineSet can be scaled down - further, followed by scaling up the new MachineSet, - ensuring that the total number of machines - available at all times during the update is - at least 70% of desired machines.' + description: |- + The maximum number of machines that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired + machines (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 0. + Example: when this is set to 30%, the old MachineSet can be scaled + down to 70% of desired machines immediately when the rolling update + starts. Once new machines are ready, old MachineSet can be scaled + down further, followed by scaling up the new MachineSet, ensuring + that the total number of machines available at all times + during the update is at least 70% of desired machines. x-kubernetes-int-or-string: true type: object type: - description: Type of deployment. Default is RollingUpdate. + description: |- + Type of deployment. Allowed values are RollingUpdate and OnDelete. + The default is RollingUpdate. enum: - RollingUpdate - OnDelete @@ -1343,36 +1415,157 @@ spec: description: Overrides can be used to override Cluster level variables. items: - description: ClusterVariable can be used to customize - the Cluster through patches. Each ClusterVariable - is associated with a Variable definition in - the ClusterClass `status` variables. + description: |- + ClusterVariable can be used to customize the Cluster through patches. Each ClusterVariable is associated with a + Variable definition in the ClusterClass `status` variables. + properties: + definitionFrom: + description: |- + DefinitionFrom specifies where the definition of this Variable is from. DefinitionFrom is `inline` when the + definition is from the ClusterClass `.spec.variables` or the name of a patch defined in the ClusterClass + `.spec.patches` where the patch is external and provides external variables. + This field is mandatory if the variable has `DefinitionsConflict: true` in ClusterClass `status.variables[]` + type: string + name: + description: Name of the variable. + type: string + value: + description: |- + Value of the variable. + Note: the value will be validated against the schema of the corresponding ClusterClassVariable + from the ClusterClass. + Note: We have to use apiextensionsv1.JSON instead of a custom JSON type, because controller-tools has a + hard-coded schema for apiextensionsv1.JSON which cannot be produced by another type via controller-tools, + i.e. it is not possible to have no type field. + Ref: https://github.com/kubernetes-sigs/controller-tools/blob/d0e03a142d0ecdd5491593e941ee1d6b5d91dba6/pkg/crd/known_types.go#L106-L111 + x-kubernetes-preserve-unknown-fields: true + required: + - name + - value + type: object + type: array + type: object + required: + - class + - name + type: object + type: array + machinePools: + description: MachinePools is a list of machine pools in the + cluster. + items: + description: |- + MachinePoolTopology specifies the different parameters for a pool of worker nodes in the topology. + This pool of nodes is managed by a MachinePool object whose lifecycle is managed by the Cluster controller. + properties: + class: + description: |- + Class is the name of the MachinePoolClass used to create the pool of worker nodes. + This should match one of the deployment classes defined in the ClusterClass object + mentioned in the `Cluster.Spec.Class` field. + type: string + failureDomains: + description: |- + FailureDomains is the list of failure domains the machine pool will be created in. + Must match a key in the FailureDomains map stored on the cluster object. + items: + type: string + type: array + metadata: + description: |- + Metadata is the metadata applied to the MachinePool. + At runtime this metadata is merged with the corresponding metadata from the ClusterClass. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + minReadySeconds: + description: |- + Minimum number of seconds for which a newly created machine pool should + be ready. + Defaults to 0 (machine will be considered available as soon as it + is ready) + format: int32 + type: integer + name: + description: |- + Name is the unique identifier for this MachinePoolTopology. + The value is used with other unique identifiers to create a MachinePool's Name + (e.g. cluster's name, etc). In case the name is greater than the allowed maximum length, + the values are hashed together. + type: string + nodeDeletionTimeout: + description: |- + NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the MachinePool + hosts after the MachinePool is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. + type: string + nodeDrainTimeout: + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + type: string + nodeVolumeDetachTimeout: + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + type: string + replicas: + description: |- + Replicas is the number of nodes belonging to this pool. + If the value is nil, the MachinePool is created without the number of Replicas (defaulting to 1) + and it's assumed that an external entity (like cluster autoscaler) is responsible for the management + of this value. + format: int32 + type: integer + variables: + description: Variables can be used to customize the + MachinePool through patches. + properties: + overrides: + description: Overrides can be used to override Cluster + level variables. + items: + description: |- + ClusterVariable can be used to customize the Cluster through patches. Each ClusterVariable is associated with a + Variable definition in the ClusterClass `status` variables. properties: definitionFrom: - description: 'DefinitionFrom specifies where - the definition of this Variable is from. - DefinitionFrom is `inline` when the definition - is from the ClusterClass `.spec.variables` - or the name of a patch defined in the ClusterClass - `.spec.patches` where the patch is external - and provides external variables. This field - is mandatory if the variable has `DefinitionsConflict: - true` in ClusterClass `status.variables[]`' + description: |- + DefinitionFrom specifies where the definition of this Variable is from. DefinitionFrom is `inline` when the + definition is from the ClusterClass `.spec.variables` or the name of a patch defined in the ClusterClass + `.spec.patches` where the patch is external and provides external variables. + This field is mandatory if the variable has `DefinitionsConflict: true` in ClusterClass `status.variables[]` type: string name: description: Name of the variable. type: string value: - description: 'Value of the variable. Note: - the value will be validated against the - schema of the corresponding ClusterClassVariable - from the ClusterClass. Note: We have to - use apiextensionsv1.JSON instead of a custom - JSON type, because controller-tools has - a hard-coded schema for apiextensionsv1.JSON - which cannot be produced by another type - via controller-tools, i.e. it is not possible - to have no type field. Ref: https://github.com/kubernetes-sigs/controller-tools/blob/d0e03a142d0ecdd5491593e941ee1d6b5d91dba6/pkg/crd/known_types.go#L106-L111' + description: |- + Value of the variable. + Note: the value will be validated against the schema of the corresponding ClusterClassVariable + from the ClusterClass. + Note: We have to use apiextensionsv1.JSON instead of a custom JSON type, because controller-tools has a + hard-coded schema for apiextensionsv1.JSON which cannot be produced by another type via controller-tools, + i.e. it is not possible to have no type field. + Ref: https://github.com/kubernetes-sigs/controller-tools/blob/d0e03a142d0ecdd5491593e941ee1d6b5d91dba6/pkg/crd/known_types.go#L106-L111 x-kubernetes-preserve-unknown-fields: true required: - name @@ -1401,37 +1594,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -1440,13 +1633,18 @@ spec: type: object type: array controlPlaneReady: - description: ControlPlaneReady defines if the control plane is ready. + description: |- + ControlPlaneReady denotes if the control plane became ready during initial provisioning + to receive requests. + NOTE: this field is part of the Cluster API contract and it is used to orchestrate provisioning. + The value of this field is never updated after provisioning is completed. Please use conditions + to check the operational state of the control plane. type: boolean failureDomains: additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure - domains a cluster can optionally span across. + description: |- + FailureDomainSpec is the Schema for Cluster API failure domains. + It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: additionalProperties: @@ -1463,13 +1661,15 @@ spec: from the infrastructure provider. type: object failureMessage: - description: FailureMessage indicates that there is a fatal problem - reconciling the state, and will be set to a descriptive error message. + description: |- + FailureMessage indicates that there is a fatal problem reconciling the + state, and will be set to a descriptive error message. type: string failureReason: - description: FailureReason indicates that there is a fatal problem - reconciling the state, and will be set to a token value suitable - for programmatic interpretation. + description: |- + FailureReason indicates that there is a fatal problem reconciling the + state, and will be set to a token value suitable for + programmatic interpretation. type: string infrastructureReady: description: InfrastructureReady is the state of the infrastructure @@ -1481,7 +1681,8 @@ spec: format: int64 type: integer phase: - description: Phase represents the current phase of cluster actuation. + description: |- + Phase represents the current phase of cluster actuation. E.g. Pending, Running, Terminating, Failed etc. type: string type: object diff --git a/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml b/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml index 38573e47507a..0dd0672d2cf4 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: machinedeployments.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -41,21 +40,30 @@ spec: jsonPath: .status.unavailableReplicas name: Unavailable type: integer + deprecated: true name: v1alpha3 schema: openAPIV3Schema: - description: "MachineDeployment is the Schema for the machinedeployments API. - \n Deprecated: This type will be removed in one of the next releases." + description: |- + MachineDeployment is the Schema for the machinedeployments API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -68,232 +76,265 @@ spec: minLength: 1 type: string minReadySeconds: - description: Minimum number of seconds for which a newly created machine - should be ready. Defaults to 0 (machine will be considered available - as soon as it is ready) + description: |- + Minimum number of seconds for which a newly created machine should + be ready. + Defaults to 0 (machine will be considered available as soon as it + is ready) format: int32 type: integer paused: description: Indicates that the deployment is paused. type: boolean progressDeadlineSeconds: - description: The maximum time in seconds for a deployment to make - progress before it is considered to be failed. The deployment controller - will continue to process failed deployments and a condition with - a ProgressDeadlineExceeded reason will be surfaced in the deployment - status. Note that progress will not be estimated during the time - a deployment is paused. Defaults to 600s. + description: |- + The maximum time in seconds for a deployment to make progress before it + is considered to be failed. The deployment controller will continue to + process failed deployments and a condition with a ProgressDeadlineExceeded + reason will be surfaced in the deployment status. Note that progress will + not be estimated during the time a deployment is paused. Defaults to 600s. format: int32 type: integer replicas: - description: Number of desired machines. Defaults to 1. This is a - pointer to distinguish between explicit zero and not specified. + description: |- + Number of desired machines. Defaults to 1. + This is a pointer to distinguish between explicit zero and not specified. format: int32 type: integer revisionHistoryLimit: - description: The number of old MachineSets to retain to allow rollback. + description: |- + The number of old MachineSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. format: int32 type: integer selector: - description: Label selector for machines. Existing MachineSets whose - machines are selected by this will be the ones affected by this - deployment. It must match the machine template's labels. + description: |- + Label selector for machines. Existing MachineSets whose machines are + selected by this will be the ones affected by this deployment. + It must match the machine template's labels. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic strategy: - description: The deployment strategy to use to replace existing machines - with new ones. + description: |- + The deployment strategy to use to replace existing machines with + new ones. properties: rollingUpdate: - description: Rolling update config params. Present only if MachineDeploymentStrategyType - = RollingUpdate. + description: |- + Rolling update config params. Present only if + MachineDeploymentStrategyType = RollingUpdate. properties: maxSurge: anyOf: - type: integer - type: string - description: 'The maximum number of machines that can be scheduled - above the desired number of machines. Value can be an absolute - number (ex: 5) or a percentage of desired machines (ex: - 10%). This can not be 0 if MaxUnavailable is 0. Absolute - number is calculated from percentage by rounding up. Defaults - to 1. Example: when this is set to 30%, the new MachineSet - can be scaled up immediately when the rolling update starts, - such that the total number of old and new machines do not - exceed 130% of desired machines. Once old machines have - been killed, new MachineSet can be scaled up further, ensuring - that total number of machines running at any time during - the update is at most 130% of desired machines.' + description: |- + The maximum number of machines that can be scheduled above the + desired number of machines. + Value can be an absolute number (ex: 5) or a percentage of + desired machines (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 1. + Example: when this is set to 30%, the new MachineSet can be scaled + up immediately when the rolling update starts, such that the total + number of old and new machines do not exceed 130% of desired + machines. Once old machines have been killed, new MachineSet can + be scaled up further, ensuring that total number of machines running + at any time during the update is at most 130% of desired machines. x-kubernetes-int-or-string: true maxUnavailable: anyOf: - type: integer - type: string - description: 'The maximum number of machines that can be unavailable - during the update. Value can be an absolute number (ex: - 5) or a percentage of desired machines (ex: 10%). Absolute - number is calculated from percentage by rounding down. This - can not be 0 if MaxSurge is 0. Defaults to 0. Example: when - this is set to 30%, the old MachineSet can be scaled down - to 70% of desired machines immediately when the rolling - update starts. Once new machines are ready, old MachineSet - can be scaled down further, followed by scaling up the new - MachineSet, ensuring that the total number of machines available - at all times during the update is at least 70% of desired - machines.' + description: |- + The maximum number of machines that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired + machines (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 0. + Example: when this is set to 30%, the old MachineSet can be scaled + down to 70% of desired machines immediately when the rolling update + starts. Once new machines are ready, old MachineSet can be scaled + down further, followed by scaling up the new MachineSet, ensuring + that the total number of machines available at all times + during the update is at least 70% of desired machines. x-kubernetes-int-or-string: true type: object type: - description: Type of deployment. Currently the only supported - strategy is "RollingUpdate". Default is RollingUpdate. + description: |- + Type of deployment. Currently the only supported strategy is + "RollingUpdate". + Default is RollingUpdate. type: string type: object template: description: Template describes the machines that will be created. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object generateName: - description: "GenerateName is an optional prefix, used by - the server, to generate a unique name ONLY IF the Name field - has not been provided. If this field is used, the name returned - to the client will be different than the name passed. This - value will also be combined with a unique suffix. The provided - value has the same validation rules as the Name field, and - may be truncated by the length of the suffix required to - make the value unique on the server. \n If this field is - specified and the generated name exists, the server will - NOT return a 409 - instead, it will either return 201 Created - or 500 with Reason ServerTimeout indicating a unique name - could not be found in the time allotted, and the client - should retry (optionally after the time indicated in the - Retry-After header). \n Applied only if Name is not specified. + description: |- + GenerateName is an optional prefix, used by the server, to generate a unique + name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique suffix. + The provided value has the same validation rules as the Name field, + and may be truncated by the length of the suffix required to make the value + unique on the server. + + + If this field is specified and the generated name exists, the server will + NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + ServerTimeout indicating a unique name could not be found in the time allotted, and the client + should retry (optionally after the time indicated in the Retry-After header). + + + Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency - \n Deprecated: This field has no function and is going to - be removed in a next release." + + + Deprecated: This field has no function and is going to be removed in a next release. type: string labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object name: - description: "Name must be unique within a namespace. Is required - when creating resources, although some resources may allow - a client to request the generation of an appropriate name - automatically. Name is primarily intended for creation idempotence - and configuration definition. Cannot be updated. More info: - http://kubernetes.io/docs/user-guide/identifiers#names \n - Deprecated: This field has no function and is going to be - removed in a next release." + description: |- + Name must be unique within a namespace. Is required when creating resources, although + some resources may allow a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence and configuration + definition. + Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names + + + Deprecated: This field has no function and is going to be removed in a next release. type: string namespace: - description: "Namespace defines the space within each name - must be unique. An empty namespace is equivalent to the - \"default\" namespace, but \"default\" is the canonical - representation. Not all objects are required to be scoped - to a namespace - the value of this field for those objects - will be empty. \n Must be a DNS_LABEL. Cannot be updated. + description: |- + Namespace defines the space within each name must be unique. An empty namespace is + equivalent to the "default" namespace, but "default" is the canonical representation. + Not all objects are required to be scoped to a namespace - the value of this field for + those objects will be empty. + + + Must be a DNS_LABEL. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces - \n Deprecated: This field has no function and is going to - be removed in a next release." + + + Deprecated: This field has no function and is going to be removed in a next release. type: string ownerReferences: - description: "List of objects depended by this object. If - ALL objects in the list have been deleted, this object will - be garbage collected. If this object is managed by a controller, - then an entry in this list will point to this controller, - with the controller field set to true. There cannot be more - than one managing controller. \n Deprecated: This field - has no function and is going to be removed in a next release." + description: |- + List of objects depended by this object. If ALL objects in the list have + been deleted, this object will be garbage collected. If this object is managed by a controller, + then an entry in this list will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. + + + Deprecated: This field has no function and is going to be removed in a next release. items: - description: OwnerReference contains enough information - to let you identify an owning object. An owning object - must be in the same namespace as the dependent, or be - cluster-scoped, so there is no namespace field. + description: |- + OwnerReference contains enough information to let you identify an owning + object. An owning object must be in the same namespace as the dependent, or + be cluster-scoped, so there is no namespace field. properties: apiVersion: description: API version of the referent. type: string blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the - key-value store until this reference is removed. See - https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion - for how the garbage collector interacts with this - field and enforces the foreground deletion. Defaults - to false. To set this field, a user needs "delete" - permission of the owner, otherwise 422 (Unprocessable - Entity) will be returned. + description: |- + If true, AND if the owner has the "foregroundDeletion" finalizer, then + the owner cannot be deleted from the key-value store until this + reference is removed. + See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + for how the garbage collector interacts with this field and enforces the foreground deletion. + Defaults to false. + To set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. type: boolean controller: description: If true, this reference points to the managing controller. type: boolean kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names type: string uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids type: string required: - apiVersion @@ -305,67 +346,75 @@ spec: type: array type: object spec: - description: 'Specification of the desired behavior of the machine. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + description: |- + Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: bootstrap: - description: Bootstrap is a reference to a local struct which - encapsulates fields to configure the Machine’s bootstrapping - mechanism. + description: |- + Bootstrap is a reference to a local struct which encapsulates + fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference - is optional to allow users/operators to specify Bootstrap.Data - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.Data without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container - that triggered the event) or if no container name - is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part - of an object. TODO: this design is not final and - this field is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this - reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic data: - description: "Data contains the bootstrap data, such as - cloud-init details scripts. If nil, the Machine should - remain in the Pending state. \n Deprecated: Switch to - DataSecretName." + description: |- + Data contains the bootstrap data, such as cloud-init details scripts. + If nil, the Machine should remain in the Pending state. + + + Deprecated: Switch to DataSecretName. type: string dataSecretName: - description: DataSecretName is the name of the secret - that stores the bootstrap data script. If nil, the Machine - should remain in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -374,76 +423,78 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine - will be created in. Must match a key in the FailureDomains - map stored on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to - a custom resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time - that the controller will spend on draining a node. The default - value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different - from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string providerID: - description: ProviderID is the identification ID of the machine - provided by the provider. This field must match the provider - ID as seen on the node object corresponding to this machine. - This field is required by higher level consumers of cluster-api. - Example use case is cluster autoscaler with cluster-api - as provider. Clean-up logic in the autoscaler compares machines - to nodes to find out machines at provider which could not - get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field - is required by autoscaler to be able to have a provider - view of the list of machines. Another list of nodes is queried - from the k8s apiserver and then a comparison is done to - find out unregistered machines and are marked for delete. - This field will be set by the actuators and consumed by - higher level entities like autoscaler that will be interfacing - with cluster-api as generic provider. + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will + be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. + description: |- + Version defines the desired Kubernetes version. This field is meant to be optionally used by bootstrap providers. type: string required: @@ -461,8 +512,9 @@ spec: description: MachineDeploymentStatus defines the observed state of MachineDeployment. properties: availableReplicas: - description: Total number of available machines (ready for at least - minReadySeconds) targeted by this deployment. + description: |- + Total number of available machines (ready for at least minReadySeconds) + targeted by this deployment. format: int32 type: integer observedGeneration: @@ -478,32 +530,35 @@ spec: format: int32 type: integer replicas: - description: Total number of non-terminated machines targeted by this - deployment (their labels match the selector). + description: |- + Total number of non-terminated machines targeted by this deployment + (their labels match the selector). format: int32 type: integer selector: - description: 'Selector is the same as the label selector but in the - string format to avoid introspection by clients. The string will - be in the same format as the query-param syntax. More info about - label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors' + description: |- + Selector is the same as the label selector but in the string format to avoid introspection + by clients. The string will be in the same format as the query-param syntax. + More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors type: string unavailableReplicas: - description: Total number of unavailable machines targeted by this - deployment. This is the total number of machines that are still - required for the deployment to have 100% available capacity. They - may either be machines that are running but not yet available or - machines that still have not been created. + description: |- + Total number of unavailable machines targeted by this deployment. + This is the total number of machines that are still required for + the deployment to have 100% available capacity. They may either + be machines that are running but not yet available or machines + that still have not been created. format: int32 type: integer updatedReplicas: - description: Total number of non-terminated machines targeted by this - deployment that have the desired template spec. + description: |- + Total number of non-terminated machines targeted by this deployment + that have the desired template spec. format: int32 type: integer type: object type: object - served: true + served: false storage: false subresources: scale: @@ -541,21 +596,30 @@ spec: jsonPath: .status.unavailableReplicas name: Unavailable type: integer + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "MachineDeployment is the Schema for the machinedeployments API. - \n Deprecated: This type will be removed in one of the next releases." + description: |- + MachineDeployment is the Schema for the machinedeployments API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -568,95 +632,103 @@ spec: minLength: 1 type: string minReadySeconds: - description: Minimum number of seconds for which a newly created machine - should be ready. Defaults to 0 (machine will be considered available - as soon as it is ready) + description: |- + Minimum number of seconds for which a newly created machine should + be ready. + Defaults to 0 (machine will be considered available as soon as it + is ready) format: int32 type: integer paused: description: Indicates that the deployment is paused. type: boolean progressDeadlineSeconds: - description: The maximum time in seconds for a deployment to make - progress before it is considered to be failed. The deployment controller - will continue to process failed deployments and a condition with - a ProgressDeadlineExceeded reason will be surfaced in the deployment - status. Note that progress will not be estimated during the time - a deployment is paused. Defaults to 600s. + description: |- + The maximum time in seconds for a deployment to make progress before it + is considered to be failed. The deployment controller will continue to + process failed deployments and a condition with a ProgressDeadlineExceeded + reason will be surfaced in the deployment status. Note that progress will + not be estimated during the time a deployment is paused. Defaults to 600s. format: int32 type: integer replicas: default: 1 - description: Number of desired machines. Defaults to 1. This is a - pointer to distinguish between explicit zero and not specified. + description: |- + Number of desired machines. Defaults to 1. + This is a pointer to distinguish between explicit zero and not specified. format: int32 type: integer revisionHistoryLimit: - description: The number of old MachineSets to retain to allow rollback. + description: |- + The number of old MachineSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. format: int32 type: integer selector: - description: Label selector for machines. Existing MachineSets whose - machines are selected by this will be the ones affected by this - deployment. It must match the machine template's labels. + description: |- + Label selector for machines. Existing MachineSets whose machines are + selected by this will be the ones affected by this deployment. + It must match the machine template's labels. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic strategy: - description: The deployment strategy to use to replace existing machines - with new ones. + description: |- + The deployment strategy to use to replace existing machines with + new ones. properties: rollingUpdate: - description: Rolling update config params. Present only if MachineDeploymentStrategyType - = RollingUpdate. + description: |- + Rolling update config params. Present only if + MachineDeploymentStrategyType = RollingUpdate. properties: deletePolicy: - description: DeletePolicy defines the policy used by the MachineDeployment - to identify nodes to delete when downscaling. Valid values - are "Random, "Newest", "Oldest" When no value is supplied, - the default DeletePolicy of MachineSet is used + description: |- + DeletePolicy defines the policy used by the MachineDeployment to identify nodes to delete when downscaling. + Valid values are "Random, "Newest", "Oldest" + When no value is supplied, the default DeletePolicy of MachineSet is used enum: - Random - Newest @@ -666,39 +738,44 @@ spec: anyOf: - type: integer - type: string - description: 'The maximum number of machines that can be scheduled - above the desired number of machines. Value can be an absolute - number (ex: 5) or a percentage of desired machines (ex: - 10%). This can not be 0 if MaxUnavailable is 0. Absolute - number is calculated from percentage by rounding up. Defaults - to 1. Example: when this is set to 30%, the new MachineSet - can be scaled up immediately when the rolling update starts, - such that the total number of old and new machines do not - exceed 130% of desired machines. Once old machines have - been killed, new MachineSet can be scaled up further, ensuring - that total number of machines running at any time during - the update is at most 130% of desired machines.' + description: |- + The maximum number of machines that can be scheduled above the + desired number of machines. + Value can be an absolute number (ex: 5) or a percentage of + desired machines (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 1. + Example: when this is set to 30%, the new MachineSet can be scaled + up immediately when the rolling update starts, such that the total + number of old and new machines do not exceed 130% of desired + machines. Once old machines have been killed, new MachineSet can + be scaled up further, ensuring that total number of machines running + at any time during the update is at most 130% of desired machines. x-kubernetes-int-or-string: true maxUnavailable: anyOf: - type: integer - type: string - description: 'The maximum number of machines that can be unavailable - during the update. Value can be an absolute number (ex: - 5) or a percentage of desired machines (ex: 10%). Absolute - number is calculated from percentage by rounding down. This - can not be 0 if MaxSurge is 0. Defaults to 0. Example: when - this is set to 30%, the old MachineSet can be scaled down - to 70% of desired machines immediately when the rolling - update starts. Once new machines are ready, old MachineSet - can be scaled down further, followed by scaling up the new - MachineSet, ensuring that the total number of machines available - at all times during the update is at least 70% of desired - machines.' + description: |- + The maximum number of machines that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired + machines (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 0. + Example: when this is set to 30%, the old MachineSet can be scaled + down to 70% of desired machines immediately when the rolling update + starts. Once new machines are ready, old MachineSet can be scaled + down further, followed by scaling up the new MachineSet, ensuring + that the total number of machines available at all times + during the update is at least 70% of desired machines. x-kubernetes-int-or-string: true type: object type: - description: Type of deployment. Default is RollingUpdate. + description: |- + Type of deployment. + Default is RollingUpdate. enum: - RollingUpdate - OnDelete @@ -708,82 +785,91 @@ spec: description: Template describes the machines that will be created. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: - description: 'Specification of the desired behavior of the machine. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + description: |- + Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: bootstrap: - description: Bootstrap is a reference to a local struct which - encapsulates fields to configure the Machine’s bootstrapping - mechanism. + description: |- + Bootstrap is a reference to a local struct which encapsulates + fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference - is optional to allow users/operators to specify Bootstrap.DataSecretName - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.DataSecretName without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container - that triggered the event) or if no container name - is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part - of an object. TODO: this design is not final and - this field is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this - reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic dataSecretName: - description: DataSecretName is the name of the secret - that stores the bootstrap data script. If nil, the Machine - should remain in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -792,76 +878,78 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine - will be created in. Must match a key in the FailureDomains - map stored on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to - a custom resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time - that the controller will spend on draining a node. The default - value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different - from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string providerID: - description: ProviderID is the identification ID of the machine - provided by the provider. This field must match the provider - ID as seen on the node object corresponding to this machine. - This field is required by higher level consumers of cluster-api. - Example use case is cluster autoscaler with cluster-api - as provider. Clean-up logic in the autoscaler compares machines - to nodes to find out machines at provider which could not - get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field - is required by autoscaler to be able to have a provider - view of the list of machines. Another list of nodes is queried - from the k8s apiserver and then a comparison is done to - find out unregistered machines and are marked for delete. - This field will be set by the actuators and consumed by - higher level entities like autoscaler that will be interfacing - with cluster-api as generic provider. + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will + be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. + description: |- + Version defines the desired Kubernetes version. This field is meant to be optionally used by bootstrap providers. type: string required: @@ -879,8 +967,9 @@ spec: description: MachineDeploymentStatus defines the observed state of MachineDeployment. properties: availableReplicas: - description: Total number of available machines (ready for at least - minReadySeconds) targeted by this deployment. + description: |- + Total number of available machines (ready for at least minReadySeconds) + targeted by this deployment. format: int32 type: integer conditions: @@ -890,37 +979,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -940,32 +1029,35 @@ spec: format: int32 type: integer replicas: - description: Total number of non-terminated machines targeted by this - deployment (their labels match the selector). + description: |- + Total number of non-terminated machines targeted by this deployment + (their labels match the selector). format: int32 type: integer selector: - description: 'Selector is the same as the label selector but in the - string format to avoid introspection by clients. The string will - be in the same format as the query-param syntax. More info about - label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors' + description: |- + Selector is the same as the label selector but in the string format to avoid introspection + by clients. The string will be in the same format as the query-param syntax. + More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors type: string unavailableReplicas: - description: Total number of unavailable machines targeted by this - deployment. This is the total number of machines that are still - required for the deployment to have 100% available capacity. They - may either be machines that are running but not yet available or - machines that still have not been created. + description: |- + Total number of unavailable machines targeted by this deployment. + This is the total number of machines that are still required for + the deployment to have 100% available capacity. They may either + be machines that are running but not yet available or machines + that still have not been created. format: int32 type: integer updatedReplicas: - description: Total number of non-terminated machines targeted by this - deployment that have the desired template spec. + description: |- + Total number of non-terminated machines targeted by this deployment + that have the desired template spec. format: int32 type: integer type: object type: object - served: true + served: false storage: false subresources: scale: @@ -1018,14 +1110,19 @@ spec: description: MachineDeployment is the Schema for the machinedeployments API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -1038,108 +1135,126 @@ spec: minLength: 1 type: string minReadySeconds: - description: Minimum number of seconds for which a newly created machine - should be ready. Defaults to 0 (machine will be considered available - as soon as it is ready) + description: |- + MinReadySeconds is the minimum number of seconds for which a Node for a newly created machine should be ready before considering the replica available. + Defaults to 0 (machine will be considered available as soon as the Node is ready) format: int32 type: integer paused: description: Indicates that the deployment is paused. type: boolean progressDeadlineSeconds: - description: The maximum time in seconds for a deployment to make - progress before it is considered to be failed. The deployment controller - will continue to process failed deployments and a condition with - a ProgressDeadlineExceeded reason will be surfaced in the deployment - status. Note that progress will not be estimated during the time - a deployment is paused. Defaults to 600s. + description: |- + The maximum time in seconds for a deployment to make progress before it + is considered to be failed. The deployment controller will continue to + process failed deployments and a condition with a ProgressDeadlineExceeded + reason will be surfaced in the deployment status. Note that progress will + not be estimated during the time a deployment is paused. Defaults to 600s. format: int32 type: integer replicas: - description: "Number of desired machines. This is a pointer to distinguish - between explicit zero and not specified. \n Defaults to: * if the - Kubernetes autoscaler min size and max size annotations are set: - - if it's a new MachineDeployment, use min size - if the replicas - field of the old MachineDeployment is < min size, use min size - - if the replicas field of the old MachineDeployment is > max size, - use max size - if the replicas field of the old MachineDeployment - is in the (min size, max size) range, keep the value from the oldMD - * otherwise use 1 Note: Defaulting will be run whenever the replicas - field is not set: * A new MachineDeployment is created with replicas - not set. * On an existing MachineDeployment the replicas field was - first set and is now unset. Those cases are especially relevant - for the following Kubernetes autoscaler use cases: * A new MachineDeployment - is created and replicas should be managed by the autoscaler * An - existing MachineDeployment which initially wasn't controlled by - the autoscaler should be later controlled by the autoscaler" + description: |- + Number of desired machines. + This is a pointer to distinguish between explicit zero and not specified. + + + Defaults to: + * if the Kubernetes autoscaler min size and max size annotations are set: + - if it's a new MachineDeployment, use min size + - if the replicas field of the old MachineDeployment is < min size, use min size + - if the replicas field of the old MachineDeployment is > max size, use max size + - if the replicas field of the old MachineDeployment is in the (min size, max size) range, keep the value from the oldMD + * otherwise use 1 + Note: Defaulting will be run whenever the replicas field is not set: + * A new MachineDeployment is created with replicas not set. + * On an existing MachineDeployment the replicas field was first set and is now unset. + Those cases are especially relevant for the following Kubernetes autoscaler use cases: + * A new MachineDeployment is created and replicas should be managed by the autoscaler + * An existing MachineDeployment which initially wasn't controlled by the autoscaler + should be later controlled by the autoscaler format: int32 type: integer revisionHistoryLimit: - description: The number of old MachineSets to retain to allow rollback. + description: |- + The number of old MachineSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. format: int32 type: integer + rolloutAfter: + description: |- + RolloutAfter is a field to indicate a rollout should be performed + after the specified time even if no changes have been made to the + MachineDeployment. + Example: In the YAML the time can be specified in the RFC3339 format. + To specify the rolloutAfter target as March 9, 2023, at 9 am UTC + use "2023-03-09T09:00:00Z". + format: date-time + type: string selector: - description: Label selector for machines. Existing MachineSets whose - machines are selected by this will be the ones affected by this - deployment. It must match the machine template's labels. + description: |- + Label selector for machines. Existing MachineSets whose machines are + selected by this will be the ones affected by this deployment. + It must match the machine template's labels. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic strategy: - description: The deployment strategy to use to replace existing machines - with new ones. + description: |- + The deployment strategy to use to replace existing machines with + new ones. properties: rollingUpdate: - description: Rolling update config params. Present only if MachineDeploymentStrategyType - = RollingUpdate. + description: |- + Rolling update config params. Present only if + MachineDeploymentStrategyType = RollingUpdate. properties: deletePolicy: - description: DeletePolicy defines the policy used by the MachineDeployment - to identify nodes to delete when downscaling. Valid values - are "Random, "Newest", "Oldest" When no value is supplied, - the default DeletePolicy of MachineSet is used + description: |- + DeletePolicy defines the policy used by the MachineDeployment to identify nodes to delete when downscaling. + Valid values are "Random, "Newest", "Oldest" + When no value is supplied, the default DeletePolicy of MachineSet is used enum: - Random - Newest @@ -1149,39 +1264,44 @@ spec: anyOf: - type: integer - type: string - description: 'The maximum number of machines that can be scheduled - above the desired number of machines. Value can be an absolute - number (ex: 5) or a percentage of desired machines (ex: - 10%). This can not be 0 if MaxUnavailable is 0. Absolute - number is calculated from percentage by rounding up. Defaults - to 1. Example: when this is set to 30%, the new MachineSet - can be scaled up immediately when the rolling update starts, - such that the total number of old and new machines do not - exceed 130% of desired machines. Once old machines have - been killed, new MachineSet can be scaled up further, ensuring - that total number of machines running at any time during - the update is at most 130% of desired machines.' + description: |- + The maximum number of machines that can be scheduled above the + desired number of machines. + Value can be an absolute number (ex: 5) or a percentage of + desired machines (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 1. + Example: when this is set to 30%, the new MachineSet can be scaled + up immediately when the rolling update starts, such that the total + number of old and new machines do not exceed 130% of desired + machines. Once old machines have been killed, new MachineSet can + be scaled up further, ensuring that total number of machines running + at any time during the update is at most 130% of desired machines. x-kubernetes-int-or-string: true maxUnavailable: anyOf: - type: integer - type: string - description: 'The maximum number of machines that can be unavailable - during the update. Value can be an absolute number (ex: - 5) or a percentage of desired machines (ex: 10%). Absolute - number is calculated from percentage by rounding down. This - can not be 0 if MaxSurge is 0. Defaults to 0. Example: when - this is set to 30%, the old MachineSet can be scaled down - to 70% of desired machines immediately when the rolling - update starts. Once new machines are ready, old MachineSet - can be scaled down further, followed by scaling up the new - MachineSet, ensuring that the total number of machines available - at all times during the update is at least 70% of desired - machines.' + description: |- + The maximum number of machines that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired + machines (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 0. + Example: when this is set to 30%, the old MachineSet can be scaled + down to 70% of desired machines immediately when the rolling update + starts. Once new machines are ready, old MachineSet can be scaled + down further, followed by scaling up the new MachineSet, ensuring + that the total number of machines available at all times + during the update is at least 70% of desired machines. x-kubernetes-int-or-string: true type: object type: - description: Type of deployment. Default is RollingUpdate. + description: |- + Type of deployment. Allowed values are RollingUpdate and OnDelete. + The default is RollingUpdate. enum: - RollingUpdate - OnDelete @@ -1191,82 +1311,91 @@ spec: description: Template describes the machines that will be created. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: - description: 'Specification of the desired behavior of the machine. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + description: |- + Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: bootstrap: - description: Bootstrap is a reference to a local struct which - encapsulates fields to configure the Machine’s bootstrapping - mechanism. + description: |- + Bootstrap is a reference to a local struct which encapsulates + fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference - is optional to allow users/operators to specify Bootstrap.DataSecretName - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.DataSecretName without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container - that triggered the event) or if no container name - is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part - of an object. TODO: this design is not final and - this field is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this - reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic dataSecretName: - description: DataSecretName is the name of the secret - that stores the bootstrap data script. If nil, the Machine - should remain in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -1275,88 +1404,89 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine - will be created in. Must match a key in the FailureDomains - map stored on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to - a custom resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDeletionTimeout: - description: NodeDeletionTimeout defines how long the controller - will attempt to delete the Node that the Machine hosts after - the Machine is marked for deletion. A duration of 0 will - retry deletion indefinitely. Defaults to 10 seconds. + description: |- + NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. type: string nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time - that the controller will spend on draining a node. The default - value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different - from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string nodeVolumeDetachTimeout: - description: NodeVolumeDetachTimeout is the total amount of - time that the controller will spend on waiting for all volumes - to be detached. The default value is 0, meaning that the - volumes can be detached without any time limitations. + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. type: string providerID: - description: ProviderID is the identification ID of the machine - provided by the provider. This field must match the provider - ID as seen on the node object corresponding to this machine. - This field is required by higher level consumers of cluster-api. - Example use case is cluster autoscaler with cluster-api - as provider. Clean-up logic in the autoscaler compares machines - to nodes to find out machines at provider which could not - get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field - is required by autoscaler to be able to have a provider - view of the list of machines. Another list of nodes is queried - from the k8s apiserver and then a comparison is done to - find out unregistered machines and are marked for delete. - This field will be set by the actuators and consumed by - higher level entities like autoscaler that will be interfacing - with cluster-api as generic provider. + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will + be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. + description: |- + Version defines the desired Kubernetes version. This field is meant to be optionally used by bootstrap providers. type: string required: @@ -1374,8 +1504,9 @@ spec: description: MachineDeploymentStatus defines the observed state of MachineDeployment. properties: availableReplicas: - description: Total number of available machines (ready for at least - minReadySeconds) targeted by this deployment. + description: |- + Total number of available machines (ready for at least minReadySeconds) + targeted by this deployment. format: int32 type: integer conditions: @@ -1385,37 +1516,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -1436,27 +1567,30 @@ spec: format: int32 type: integer replicas: - description: Total number of non-terminated machines targeted by this - deployment (their labels match the selector). + description: |- + Total number of non-terminated machines targeted by this deployment + (their labels match the selector). format: int32 type: integer selector: - description: 'Selector is the same as the label selector but in the - string format to avoid introspection by clients. The string will - be in the same format as the query-param syntax. More info about - label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors' + description: |- + Selector is the same as the label selector but in the string format to avoid introspection + by clients. The string will be in the same format as the query-param syntax. + More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors type: string unavailableReplicas: - description: Total number of unavailable machines targeted by this - deployment. This is the total number of machines that are still - required for the deployment to have 100% available capacity. They - may either be machines that are running but not yet available or - machines that still have not been created. + description: |- + Total number of unavailable machines targeted by this deployment. + This is the total number of machines that are still required for + the deployment to have 100% available capacity. They may either + be machines that are running but not yet available or machines + that still have not been created. format: int32 type: integer updatedReplicas: - description: Total number of non-terminated machines targeted by this - deployment that have the desired template spec. + description: |- + Total number of non-terminated machines targeted by this deployment + that have the desired template spec. format: int32 type: integer type: object diff --git a/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml b/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml index 6405395a3bcb..8467a61a146d 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: machinehealthchecks.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -33,21 +32,30 @@ spec: jsonPath: .status.currentHealthy name: CurrentHealthy type: integer + deprecated: true name: v1alpha3 schema: openAPIV3Schema: - description: "MachineHealthCheck is the Schema for the machinehealthchecks - API. \n Deprecated: This type will be removed in one of the next releases." + description: |- + MachineHealthCheck is the Schema for the machinehealthchecks API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -63,52 +71,63 @@ spec: anyOf: - type: integer - type: string - description: Any further remediation is only allowed if at most "MaxUnhealthy" - machines selected by "selector" are not healthy. + description: |- + Any further remediation is only allowed if at most "MaxUnhealthy" machines selected by + "selector" are not healthy. x-kubernetes-int-or-string: true nodeStartupTimeout: - description: Machines older than this duration without a node will - be considered to have failed and will be remediated. + description: |- + Machines older than this duration without a node will be considered to have + failed and will be remediated. type: string remediationTemplate: - description: "RemediationTemplate is a reference to a remediation - template provided by an infrastructure provider. \n This field is - completely optional, when filled, the MachineHealthCheck controller - creates a new object from the template referenced and hands off - remediation of the machine to a controller that lives outside of - Cluster API." + description: |- + RemediationTemplate is a reference to a remediation template + provided by an infrastructure provider. + + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -120,54 +139,55 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic unhealthyConditions: - description: UnhealthyConditions contains a list of the conditions - that determine whether a node is considered unhealthy. The conditions - are combined in a logical OR, i.e. if any of the conditions is met, - the node is unhealthy. + description: |- + UnhealthyConditions contains a list of the conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. items: - description: UnhealthyCondition represents a Node condition type - and value with a timeout specified as a duration. When the named - condition has been in the given status for at least the timeout - value, a node is considered unhealthy. + description: |- + UnhealthyCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. properties: status: minLength: 1 @@ -199,37 +219,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -254,9 +274,9 @@ spec: format: int64 type: integer remediationsAllowed: - description: RemediationsAllowed is the number of further remediations - allowed by this machine health check before maxUnhealthy short circuiting - will be applied + description: |- + RemediationsAllowed is the number of further remediations allowed by this machine health check before + maxUnhealthy short circuiting will be applied format: int32 minimum: 0 type: integer @@ -268,7 +288,7 @@ spec: type: array type: object type: object - served: true + served: false storage: false subresources: status: {} @@ -293,21 +313,30 @@ spec: jsonPath: .status.currentHealthy name: CurrentHealthy type: integer + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "MachineHealthCheck is the Schema for the machinehealthchecks - API. \n Deprecated: This type will be removed in one of the next releases." + description: |- + MachineHealthCheck is the Schema for the machinehealthchecks API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -323,54 +352,65 @@ spec: anyOf: - type: integer - type: string - description: Any further remediation is only allowed if at most "MaxUnhealthy" - machines selected by "selector" are not healthy. + description: |- + Any further remediation is only allowed if at most "MaxUnhealthy" machines selected by + "selector" are not healthy. x-kubernetes-int-or-string: true nodeStartupTimeout: - description: Machines older than this duration without a node will - be considered to have failed and will be remediated. If not set, - this value is defaulted to 10 minutes. If you wish to disable this - feature, set the value explicitly to 0. + description: |- + Machines older than this duration without a node will be considered to have + failed and will be remediated. + If not set, this value is defaulted to 10 minutes. + If you wish to disable this feature, set the value explicitly to 0. type: string remediationTemplate: - description: "RemediationTemplate is a reference to a remediation - template provided by an infrastructure provider. \n This field is - completely optional, when filled, the MachineHealthCheck controller - creates a new object from the template referenced and hands off - remediation of the machine to a controller that lives outside of - Cluster API." + description: |- + RemediationTemplate is a reference to a remediation template + provided by an infrastructure provider. + + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -382,54 +422,55 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic unhealthyConditions: - description: UnhealthyConditions contains a list of the conditions - that determine whether a node is considered unhealthy. The conditions - are combined in a logical OR, i.e. if any of the conditions is met, - the node is unhealthy. + description: |- + UnhealthyConditions contains a list of the conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. items: - description: UnhealthyCondition represents a Node condition type - and value with a timeout specified as a duration. When the named - condition has been in the given status for at least the timeout - value, a node is considered unhealthy. + description: |- + UnhealthyCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. properties: status: minLength: 1 @@ -447,12 +488,12 @@ spec: minItems: 1 type: array unhealthyRange: - description: 'Any further remediation is only allowed if the number - of machines selected by "selector" as not healthy is within the - range of "UnhealthyRange". Takes precedence over MaxUnhealthy. Eg. - "[3-5]" - This means that remediation will be allowed only when: - (a) there are at least 3 unhealthy machines (and) (b) there are - at most 5 unhealthy machines' + description: |- + Any further remediation is only allowed if the number of machines selected by "selector" as not healthy + is within the range of "UnhealthyRange". Takes precedence over MaxUnhealthy. + Eg. "[3-5]" - This means that remediation will be allowed only when: + (a) there are at least 3 unhealthy machines (and) + (b) there are at most 5 unhealthy machines pattern: ^\[[0-9]+-[0-9]+\]$ type: string required: @@ -470,37 +511,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -525,9 +566,9 @@ spec: format: int64 type: integer remediationsAllowed: - description: RemediationsAllowed is the number of further remediations - allowed by this machine health check before maxUnhealthy short circuiting - will be applied + description: |- + RemediationsAllowed is the number of further remediations allowed by this machine health check before + maxUnhealthy short circuiting will be applied format: int32 minimum: 0 type: integer @@ -539,7 +580,7 @@ spec: type: array type: object type: object - served: true + served: false storage: false subresources: status: {} @@ -571,14 +612,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -594,54 +640,65 @@ spec: anyOf: - type: integer - type: string - description: Any further remediation is only allowed if at most "MaxUnhealthy" - machines selected by "selector" are not healthy. + description: |- + Any further remediation is only allowed if at most "MaxUnhealthy" machines selected by + "selector" are not healthy. x-kubernetes-int-or-string: true nodeStartupTimeout: - description: Machines older than this duration without a node will - be considered to have failed and will be remediated. If not set, - this value is defaulted to 10 minutes. If you wish to disable this - feature, set the value explicitly to 0. + description: |- + Machines older than this duration without a node will be considered to have + failed and will be remediated. + If not set, this value is defaulted to 10 minutes. + If you wish to disable this feature, set the value explicitly to 0. type: string remediationTemplate: - description: "RemediationTemplate is a reference to a remediation - template provided by an infrastructure provider. \n This field is - completely optional, when filled, the MachineHealthCheck controller - creates a new object from the template referenced and hands off - remediation of the machine to a controller that lives outside of - Cluster API." + description: |- + RemediationTemplate is a reference to a remediation template + provided by an infrastructure provider. + + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -653,54 +710,55 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic unhealthyConditions: - description: UnhealthyConditions contains a list of the conditions - that determine whether a node is considered unhealthy. The conditions - are combined in a logical OR, i.e. if any of the conditions is met, - the node is unhealthy. + description: |- + UnhealthyConditions contains a list of the conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. items: - description: UnhealthyCondition represents a Node condition type - and value with a timeout specified as a duration. When the named - condition has been in the given status for at least the timeout - value, a node is considered unhealthy. + description: |- + UnhealthyCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. properties: status: minLength: 1 @@ -715,21 +773,19 @@ spec: - timeout - type type: object - minItems: 1 type: array unhealthyRange: - description: 'Any further remediation is only allowed if the number - of machines selected by "selector" as not healthy is within the - range of "UnhealthyRange". Takes precedence over MaxUnhealthy. Eg. - "[3-5]" - This means that remediation will be allowed only when: - (a) there are at least 3 unhealthy machines (and) (b) there are - at most 5 unhealthy machines' + description: |- + Any further remediation is only allowed if the number of machines selected by "selector" as not healthy + is within the range of "UnhealthyRange". Takes precedence over MaxUnhealthy. + Eg. "[3-5]" - This means that remediation will be allowed only when: + (a) there are at least 3 unhealthy machines (and) + (b) there are at most 5 unhealthy machines pattern: ^\[[0-9]+-[0-9]+\]$ type: string required: - clusterName - selector - - unhealthyConditions type: object status: description: Most recently observed status of MachineHealthCheck resource @@ -741,37 +797,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -797,9 +853,9 @@ spec: format: int64 type: integer remediationsAllowed: - description: RemediationsAllowed is the number of further remediations - allowed by this machine health check before maxUnhealthy short circuiting - will be applied + description: |- + RemediationsAllowed is the number of further remediations allowed by this machine health check before + maxUnhealthy short circuiting will be applied format: int32 minimum: 0 type: integer diff --git a/config/crd/bases/cluster.x-k8s.io_machinepools.yaml b/config/crd/bases/cluster.x-k8s.io_machinepools.yaml index dbc8a14fb329..d38bb91a2b44 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinepools.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinepools.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: machinepools.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -33,21 +32,30 @@ spec: jsonPath: .spec.template.spec.version name: Version type: string + deprecated: true name: v1alpha3 schema: openAPIV3Schema: - description: "MachinePool is the Schema for the machinepools API. \n Deprecated: - This type will be removed in one of the next releases." + description: |- + MachinePool is the Schema for the machinepools API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -66,175 +74,204 @@ spec: type: string type: array minReadySeconds: - description: Minimum number of seconds for which a newly created machine - instances should be ready. Defaults to 0 (machine instance will - be considered available as soon as it is ready) + description: |- + Minimum number of seconds for which a newly created machine instances should + be ready. + Defaults to 0 (machine instance will be considered available as soon as it + is ready) format: int32 type: integer providerIDList: - description: ProviderIDList are the identification IDs of machine - instances provided by the provider. This field must match the provider - IDs as seen on the node objects corresponding to a machine pool's - machine instances. + description: |- + ProviderIDList are the identification IDs of machine instances provided by the provider. + This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances. items: type: string type: array replicas: - description: Number of desired machines. Defaults to 1. This is a - pointer to distinguish between explicit zero and not specified. + description: |- + Number of desired machines. Defaults to 1. + This is a pointer to distinguish between explicit zero and not specified. format: int32 type: integer strategy: - description: The deployment strategy to use to replace existing machine - instances with new ones. + description: |- + The deployment strategy to use to replace existing machine instances with + new ones. properties: rollingUpdate: - description: Rolling update config params. Present only if MachineDeploymentStrategyType - = RollingUpdate. + description: |- + Rolling update config params. Present only if + MachineDeploymentStrategyType = RollingUpdate. properties: maxSurge: anyOf: - type: integer - type: string - description: 'The maximum number of machines that can be scheduled - above the desired number of machines. Value can be an absolute - number (ex: 5) or a percentage of desired machines (ex: - 10%). This can not be 0 if MaxUnavailable is 0. Absolute - number is calculated from percentage by rounding up. Defaults - to 1. Example: when this is set to 30%, the new MachineSet - can be scaled up immediately when the rolling update starts, - such that the total number of old and new machines do not - exceed 130% of desired machines. Once old machines have - been killed, new MachineSet can be scaled up further, ensuring - that total number of machines running at any time during - the update is at most 130% of desired machines.' + description: |- + The maximum number of machines that can be scheduled above the + desired number of machines. + Value can be an absolute number (ex: 5) or a percentage of + desired machines (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 1. + Example: when this is set to 30%, the new MachineSet can be scaled + up immediately when the rolling update starts, such that the total + number of old and new machines do not exceed 130% of desired + machines. Once old machines have been killed, new MachineSet can + be scaled up further, ensuring that total number of machines running + at any time during the update is at most 130% of desired machines. x-kubernetes-int-or-string: true maxUnavailable: anyOf: - type: integer - type: string - description: 'The maximum number of machines that can be unavailable - during the update. Value can be an absolute number (ex: - 5) or a percentage of desired machines (ex: 10%). Absolute - number is calculated from percentage by rounding down. This - can not be 0 if MaxSurge is 0. Defaults to 0. Example: when - this is set to 30%, the old MachineSet can be scaled down - to 70% of desired machines immediately when the rolling - update starts. Once new machines are ready, old MachineSet - can be scaled down further, followed by scaling up the new - MachineSet, ensuring that the total number of machines available - at all times during the update is at least 70% of desired - machines.' + description: |- + The maximum number of machines that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired + machines (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 0. + Example: when this is set to 30%, the old MachineSet can be scaled + down to 70% of desired machines immediately when the rolling update + starts. Once new machines are ready, old MachineSet can be scaled + down further, followed by scaling up the new MachineSet, ensuring + that the total number of machines available at all times + during the update is at least 70% of desired machines. x-kubernetes-int-or-string: true type: object type: - description: Type of deployment. Currently the only supported - strategy is "RollingUpdate". Default is RollingUpdate. + description: |- + Type of deployment. Currently the only supported strategy is + "RollingUpdate". + Default is RollingUpdate. type: string type: object template: description: Template describes the machines that will be created. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object generateName: - description: "GenerateName is an optional prefix, used by - the server, to generate a unique name ONLY IF the Name field - has not been provided. If this field is used, the name returned - to the client will be different than the name passed. This - value will also be combined with a unique suffix. The provided - value has the same validation rules as the Name field, and - may be truncated by the length of the suffix required to - make the value unique on the server. \n If this field is - specified and the generated name exists, the server will - NOT return a 409 - instead, it will either return 201 Created - or 500 with Reason ServerTimeout indicating a unique name - could not be found in the time allotted, and the client - should retry (optionally after the time indicated in the - Retry-After header). \n Applied only if Name is not specified. + description: |- + GenerateName is an optional prefix, used by the server, to generate a unique + name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique suffix. + The provided value has the same validation rules as the Name field, + and may be truncated by the length of the suffix required to make the value + unique on the server. + + + If this field is specified and the generated name exists, the server will + NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + ServerTimeout indicating a unique name could not be found in the time allotted, and the client + should retry (optionally after the time indicated in the Retry-After header). + + + Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency - \n Deprecated: This field has no function and is going to - be removed in a next release." + + + Deprecated: This field has no function and is going to be removed in a next release. type: string labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object name: - description: "Name must be unique within a namespace. Is required - when creating resources, although some resources may allow - a client to request the generation of an appropriate name - automatically. Name is primarily intended for creation idempotence - and configuration definition. Cannot be updated. More info: - http://kubernetes.io/docs/user-guide/identifiers#names \n - Deprecated: This field has no function and is going to be - removed in a next release." + description: |- + Name must be unique within a namespace. Is required when creating resources, although + some resources may allow a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence and configuration + definition. + Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names + + + Deprecated: This field has no function and is going to be removed in a next release. type: string namespace: - description: "Namespace defines the space within each name - must be unique. An empty namespace is equivalent to the - \"default\" namespace, but \"default\" is the canonical - representation. Not all objects are required to be scoped - to a namespace - the value of this field for those objects - will be empty. \n Must be a DNS_LABEL. Cannot be updated. + description: |- + Namespace defines the space within each name must be unique. An empty namespace is + equivalent to the "default" namespace, but "default" is the canonical representation. + Not all objects are required to be scoped to a namespace - the value of this field for + those objects will be empty. + + + Must be a DNS_LABEL. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces - \n Deprecated: This field has no function and is going to - be removed in a next release." + + + Deprecated: This field has no function and is going to be removed in a next release. type: string ownerReferences: - description: "List of objects depended by this object. If - ALL objects in the list have been deleted, this object will - be garbage collected. If this object is managed by a controller, - then an entry in this list will point to this controller, - with the controller field set to true. There cannot be more - than one managing controller. \n Deprecated: This field - has no function and is going to be removed in a next release." + description: |- + List of objects depended by this object. If ALL objects in the list have + been deleted, this object will be garbage collected. If this object is managed by a controller, + then an entry in this list will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. + + + Deprecated: This field has no function and is going to be removed in a next release. items: - description: OwnerReference contains enough information - to let you identify an owning object. An owning object - must be in the same namespace as the dependent, or be - cluster-scoped, so there is no namespace field. + description: |- + OwnerReference contains enough information to let you identify an owning + object. An owning object must be in the same namespace as the dependent, or + be cluster-scoped, so there is no namespace field. properties: apiVersion: description: API version of the referent. type: string blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the - key-value store until this reference is removed. See - https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion - for how the garbage collector interacts with this - field and enforces the foreground deletion. Defaults - to false. To set this field, a user needs "delete" - permission of the owner, otherwise 422 (Unprocessable - Entity) will be returned. + description: |- + If true, AND if the owner has the "foregroundDeletion" finalizer, then + the owner cannot be deleted from the key-value store until this + reference is removed. + See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + for how the garbage collector interacts with this field and enforces the foreground deletion. + Defaults to false. + To set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. type: boolean controller: description: If true, this reference points to the managing controller. type: boolean kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names type: string uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids type: string required: - apiVersion @@ -246,67 +283,75 @@ spec: type: array type: object spec: - description: 'Specification of the desired behavior of the machine. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + description: |- + Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: bootstrap: - description: Bootstrap is a reference to a local struct which - encapsulates fields to configure the Machine’s bootstrapping - mechanism. + description: |- + Bootstrap is a reference to a local struct which encapsulates + fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference - is optional to allow users/operators to specify Bootstrap.Data - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.Data without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container - that triggered the event) or if no container name - is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part - of an object. TODO: this design is not final and - this field is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this - reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic data: - description: "Data contains the bootstrap data, such as - cloud-init details scripts. If nil, the Machine should - remain in the Pending state. \n Deprecated: Switch to - DataSecretName." + description: |- + Data contains the bootstrap data, such as cloud-init details scripts. + If nil, the Machine should remain in the Pending state. + + + Deprecated: Switch to DataSecretName. type: string dataSecretName: - description: DataSecretName is the name of the secret - that stores the bootstrap data script. If nil, the Machine - should remain in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -315,76 +360,78 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine - will be created in. Must match a key in the FailureDomains - map stored on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to - a custom resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time - that the controller will spend on draining a node. The default - value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different - from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string providerID: - description: ProviderID is the identification ID of the machine - provided by the provider. This field must match the provider - ID as seen on the node object corresponding to this machine. - This field is required by higher level consumers of cluster-api. - Example use case is cluster autoscaler with cluster-api - as provider. Clean-up logic in the autoscaler compares machines - to nodes to find out machines at provider which could not - get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field - is required by autoscaler to be able to have a provider - view of the list of machines. Another list of nodes is queried - from the k8s apiserver and then a comparison is done to - find out unregistered machines and are marked for delete. - This field will be set by the actuators and consumed by - higher level entities like autoscaler that will be interfacing - with cluster-api as generic provider. + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will + be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. + description: |- + Version defines the desired Kubernetes version. This field is meant to be optionally used by bootstrap providers. type: string required: @@ -415,37 +462,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -453,13 +500,14 @@ spec: type: object type: array failureMessage: - description: FailureMessage indicates that there is a problem reconciling - the state, and will be set to a descriptive error message. + description: |- + FailureMessage indicates that there is a problem reconciling the state, + and will be set to a descriptive error message. type: string failureReason: - description: FailureReason indicates that there is a problem reconciling - the state, and will be set to a token value suitable for programmatic - interpretation. + description: |- + FailureReason indicates that there is a problem reconciling the state, and + will be set to a token value suitable for programmatic interpretation. type: string infrastructureReady: description: InfrastructureReady is the state of the infrastructure @@ -469,65 +517,66 @@ spec: description: NodeRefs will point to the corresponding Nodes if it they exist. items: - description: "ObjectReference contains enough information to let - you inspect or modify the referred object. --- New uses of this - type are discouraged because of difficulty describing its usage - when embedded in APIs. 1. Ignored fields. It includes many fields - which are not generally honored. For instance, ResourceVersion - and FieldPath are both very rarely valid in actual usage. 2. Invalid - usage help. It is impossible to add specific help for individual - usage. In most embedded usages, there are particular restrictions - like, \"must refer only to types A and B\" or \"UID not honored\" - or \"name must be restricted\". Those cannot be well described - when embedded. 3. Inconsistent validation. Because the usages - are different, the validation rules are different by usage, which - makes it hard for users to predict what will happen. 4. The fields - are both imprecise and overly precise. Kind is not a precise - mapping to a URL. This can produce ambiguity during interpretation - and require a REST mapping. In most cases, the dependency is - on the group,resource tuple and the version of the actual struct - is irrelevant. 5. We cannot easily change it. Because this type - is embedded in many locations, updates to this type will affect - numerous schemas. Don't make new APIs embed an underspecified - API type they do not control. \n Instead of using this type, create - a locally provided and used type that is well-focused on your - reference. For example, ServiceReferences for admission registration: - https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 - ." + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object + x-kubernetes-map-type: atomic type: array observedGeneration: description: ObservedGeneration is the latest generation observed @@ -535,7 +584,8 @@ spec: format: int64 type: integer phase: - description: Phase represents the current phase of cluster actuation. + description: |- + Phase represents the current phase of cluster actuation. E.g. Pending, Running, Terminating, Failed etc. type: string readyReplicas: @@ -549,17 +599,17 @@ spec: format: int32 type: integer unavailableReplicas: - description: Total number of unavailable machine instances targeted - by this machine pool. This is the total number of machine instances - that are still required for the machine pool to have 100% available - capacity. They may either be machine instances that are running - but not yet available or machine instances that still have not been - created. + description: |- + Total number of unavailable machine instances targeted by this machine pool. + This is the total number of machine instances that are still required for + the machine pool to have 100% available capacity. They may either + be machine instances that are running but not yet available or machine instances + that still have not been created. format: int32 type: integer type: object type: object - served: true + served: false storage: false subresources: scale: @@ -584,21 +634,30 @@ spec: jsonPath: .spec.template.spec.version name: Version type: string + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "MachinePool is the Schema for the machinepools API. \n Deprecated: - This type will be removed in one of the next releases." + description: |- + MachinePool is the Schema for the machinepools API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -617,104 +676,115 @@ spec: type: string type: array minReadySeconds: - description: Minimum number of seconds for which a newly created machine - instances should be ready. Defaults to 0 (machine instance will - be considered available as soon as it is ready) + description: |- + Minimum number of seconds for which a newly created machine instances should + be ready. + Defaults to 0 (machine instance will be considered available as soon as it + is ready) format: int32 type: integer providerIDList: - description: ProviderIDList are the identification IDs of machine - instances provided by the provider. This field must match the provider - IDs as seen on the node objects corresponding to a machine pool's - machine instances. + description: |- + ProviderIDList are the identification IDs of machine instances provided by the provider. + This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances. items: type: string type: array replicas: - description: Number of desired machines. Defaults to 1. This is a - pointer to distinguish between explicit zero and not specified. + description: |- + Number of desired machines. Defaults to 1. + This is a pointer to distinguish between explicit zero and not specified. format: int32 type: integer template: description: Template describes the machines that will be created. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: - description: 'Specification of the desired behavior of the machine. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + description: |- + Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: bootstrap: - description: Bootstrap is a reference to a local struct which - encapsulates fields to configure the Machine’s bootstrapping - mechanism. + description: |- + Bootstrap is a reference to a local struct which encapsulates + fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference - is optional to allow users/operators to specify Bootstrap.DataSecretName - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.DataSecretName without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container - that triggered the event) or if no container name - is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part - of an object. TODO: this design is not final and - this field is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this - reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic dataSecretName: - description: DataSecretName is the name of the secret - that stores the bootstrap data script. If nil, the Machine - should remain in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -723,76 +793,78 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine - will be created in. Must match a key in the FailureDomains - map stored on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to - a custom resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time - that the controller will spend on draining a node. The default - value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different - from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string providerID: - description: ProviderID is the identification ID of the machine - provided by the provider. This field must match the provider - ID as seen on the node object corresponding to this machine. - This field is required by higher level consumers of cluster-api. - Example use case is cluster autoscaler with cluster-api - as provider. Clean-up logic in the autoscaler compares machines - to nodes to find out machines at provider which could not - get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field - is required by autoscaler to be able to have a provider - view of the list of machines. Another list of nodes is queried - from the k8s apiserver and then a comparison is done to - find out unregistered machines and are marked for delete. - This field will be set by the actuators and consumed by - higher level entities like autoscaler that will be interfacing - with cluster-api as generic provider. + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will + be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. + description: |- + Version defines the desired Kubernetes version. This field is meant to be optionally used by bootstrap providers. type: string required: @@ -823,37 +895,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -861,13 +933,14 @@ spec: type: object type: array failureMessage: - description: FailureMessage indicates that there is a problem reconciling - the state, and will be set to a descriptive error message. + description: |- + FailureMessage indicates that there is a problem reconciling the state, + and will be set to a descriptive error message. type: string failureReason: - description: FailureReason indicates that there is a problem reconciling - the state, and will be set to a token value suitable for programmatic - interpretation. + description: |- + FailureReason indicates that there is a problem reconciling the state, and + will be set to a token value suitable for programmatic interpretation. type: string infrastructureReady: description: InfrastructureReady is the state of the infrastructure @@ -877,65 +950,66 @@ spec: description: NodeRefs will point to the corresponding Nodes if it they exist. items: - description: "ObjectReference contains enough information to let - you inspect or modify the referred object. --- New uses of this - type are discouraged because of difficulty describing its usage - when embedded in APIs. 1. Ignored fields. It includes many fields - which are not generally honored. For instance, ResourceVersion - and FieldPath are both very rarely valid in actual usage. 2. Invalid - usage help. It is impossible to add specific help for individual - usage. In most embedded usages, there are particular restrictions - like, \"must refer only to types A and B\" or \"UID not honored\" - or \"name must be restricted\". Those cannot be well described - when embedded. 3. Inconsistent validation. Because the usages - are different, the validation rules are different by usage, which - makes it hard for users to predict what will happen. 4. The fields - are both imprecise and overly precise. Kind is not a precise - mapping to a URL. This can produce ambiguity during interpretation - and require a REST mapping. In most cases, the dependency is - on the group,resource tuple and the version of the actual struct - is irrelevant. 5. We cannot easily change it. Because this type - is embedded in many locations, updates to this type will affect - numerous schemas. Don't make new APIs embed an underspecified - API type they do not control. \n Instead of using this type, create - a locally provided and used type that is well-focused on your - reference. For example, ServiceReferences for admission registration: - https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 - ." + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object + x-kubernetes-map-type: atomic type: array observedGeneration: description: ObservedGeneration is the latest generation observed @@ -943,7 +1017,8 @@ spec: format: int64 type: integer phase: - description: Phase represents the current phase of cluster actuation. + description: |- + Phase represents the current phase of cluster actuation. E.g. Pending, Running, Terminating, Failed etc. type: string readyReplicas: @@ -957,17 +1032,17 @@ spec: format: int32 type: integer unavailableReplicas: - description: Total number of unavailable machine instances targeted - by this machine pool. This is the total number of machine instances - that are still required for the machine pool to have 100% available - capacity. They may either be machine instances that are running - but not yet available or machine instances that still have not been - created. + description: |- + Total number of unavailable machine instances targeted by this machine pool. + This is the total number of machine instances that are still required for + the machine pool to have 100% available capacity. They may either + be machine instances that are running but not yet available or machine instances + that still have not been created. format: int32 type: integer type: object type: object - served: true + served: false storage: false subresources: scale: @@ -1007,14 +1082,19 @@ spec: description: MachinePool is the Schema for the machinepools API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -1033,104 +1113,116 @@ spec: type: string type: array minReadySeconds: - description: Minimum number of seconds for which a newly created machine - instances should be ready. Defaults to 0 (machine instance will - be considered available as soon as it is ready) + description: |- + Minimum number of seconds for which a newly created machine instances should + be ready. + Defaults to 0 (machine instance will be considered available as soon as it + is ready) + NOTE: No logic is implemented for this field and it currently has no behaviour. format: int32 type: integer providerIDList: - description: ProviderIDList are the identification IDs of machine - instances provided by the provider. This field must match the provider - IDs as seen on the node objects corresponding to a machine pool's - machine instances. + description: |- + ProviderIDList are the identification IDs of machine instances provided by the provider. + This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances. items: type: string type: array replicas: - description: Number of desired machines. Defaults to 1. This is a - pointer to distinguish between explicit zero and not specified. + description: |- + Number of desired machines. Defaults to 1. + This is a pointer to distinguish between explicit zero and not specified. format: int32 type: integer template: description: Template describes the machines that will be created. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: - description: 'Specification of the desired behavior of the machine. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + description: |- + Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: bootstrap: - description: Bootstrap is a reference to a local struct which - encapsulates fields to configure the Machine’s bootstrapping - mechanism. + description: |- + Bootstrap is a reference to a local struct which encapsulates + fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference - is optional to allow users/operators to specify Bootstrap.DataSecretName - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.DataSecretName without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container - that triggered the event) or if no container name - is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part - of an object. TODO: this design is not final and - this field is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this - reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic dataSecretName: - description: DataSecretName is the name of the secret - that stores the bootstrap data script. If nil, the Machine - should remain in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -1139,88 +1231,89 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine - will be created in. Must match a key in the FailureDomains - map stored on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to - a custom resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDeletionTimeout: - description: NodeDeletionTimeout defines how long the controller - will attempt to delete the Node that the Machine hosts after - the Machine is marked for deletion. A duration of 0 will - retry deletion indefinitely. Defaults to 10 seconds. + description: |- + NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. type: string nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time - that the controller will spend on draining a node. The default - value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different - from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string nodeVolumeDetachTimeout: - description: NodeVolumeDetachTimeout is the total amount of - time that the controller will spend on waiting for all volumes - to be detached. The default value is 0, meaning that the - volumes can be detached without any time limitations. + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. type: string providerID: - description: ProviderID is the identification ID of the machine - provided by the provider. This field must match the provider - ID as seen on the node object corresponding to this machine. - This field is required by higher level consumers of cluster-api. - Example use case is cluster autoscaler with cluster-api - as provider. Clean-up logic in the autoscaler compares machines - to nodes to find out machines at provider which could not - get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field - is required by autoscaler to be able to have a provider - view of the list of machines. Another list of nodes is queried - from the k8s apiserver and then a comparison is done to - find out unregistered machines and are marked for delete. - This field will be set by the actuators and consumed by - higher level entities like autoscaler that will be interfacing - with cluster-api as generic provider. + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will + be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. + description: |- + Version defines the desired Kubernetes version. This field is meant to be optionally used by bootstrap providers. type: string required: @@ -1251,37 +1344,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -1290,13 +1383,14 @@ spec: type: object type: array failureMessage: - description: FailureMessage indicates that there is a problem reconciling - the state, and will be set to a descriptive error message. + description: |- + FailureMessage indicates that there is a problem reconciling the state, + and will be set to a descriptive error message. type: string failureReason: - description: FailureReason indicates that there is a problem reconciling - the state, and will be set to a token value suitable for programmatic - interpretation. + description: |- + FailureReason indicates that there is a problem reconciling the state, and + will be set to a token value suitable for programmatic interpretation. type: string infrastructureReady: description: InfrastructureReady is the state of the infrastructure @@ -1306,65 +1400,66 @@ spec: description: NodeRefs will point to the corresponding Nodes if it they exist. items: - description: "ObjectReference contains enough information to let - you inspect or modify the referred object. --- New uses of this - type are discouraged because of difficulty describing its usage - when embedded in APIs. 1. Ignored fields. It includes many fields - which are not generally honored. For instance, ResourceVersion - and FieldPath are both very rarely valid in actual usage. 2. Invalid - usage help. It is impossible to add specific help for individual - usage. In most embedded usages, there are particular restrictions - like, \"must refer only to types A and B\" or \"UID not honored\" - or \"name must be restricted\". Those cannot be well described - when embedded. 3. Inconsistent validation. Because the usages - are different, the validation rules are different by usage, which - makes it hard for users to predict what will happen. 4. The fields - are both imprecise and overly precise. Kind is not a precise - mapping to a URL. This can produce ambiguity during interpretation - and require a REST mapping. In most cases, the dependency is - on the group,resource tuple and the version of the actual struct - is irrelevant. 5. We cannot easily change it. Because this type - is embedded in many locations, updates to this type will affect - numerous schemas. Don't make new APIs embed an underspecified - API type they do not control. \n Instead of using this type, create - a locally provided and used type that is well-focused on your - reference. For example, ServiceReferences for admission registration: - https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 - ." + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object + x-kubernetes-map-type: atomic type: array observedGeneration: description: ObservedGeneration is the latest generation observed @@ -1372,7 +1467,8 @@ spec: format: int64 type: integer phase: - description: Phase represents the current phase of cluster actuation. + description: |- + Phase represents the current phase of cluster actuation. E.g. Pending, Running, Terminating, Failed etc. type: string readyReplicas: @@ -1386,12 +1482,12 @@ spec: format: int32 type: integer unavailableReplicas: - description: Total number of unavailable machine instances targeted - by this machine pool. This is the total number of machine instances - that are still required for the machine pool to have 100% available - capacity. They may either be machine instances that are running - but not yet available or machine instances that still have not been - created. + description: |- + Total number of unavailable machine instances targeted by this machine pool. + This is the total number of machine instances that are still required for + the machine pool to have 100% available capacity. They may either + be machine instances that are running but not yet available or machine instances + that still have not been created. format: int32 type: integer type: object diff --git a/config/crd/bases/cluster.x-k8s.io_machines.yaml b/config/crd/bases/cluster.x-k8s.io_machines.yaml index 10ff7ad3d820..1b4b5ced3252 100644 --- a/config/crd/bases/cluster.x-k8s.io_machines.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machines.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: machines.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -37,21 +36,30 @@ spec: name: NodeName priority: 1 type: string + deprecated: true name: v1alpha3 schema: openAPIV3Schema: - description: "Machine is the Schema for the machines API. \n Deprecated: This - type will be removed in one of the next releases." + description: |- + Machine is the Schema for the machines API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -59,58 +67,70 @@ spec: description: MachineSpec defines the desired state of Machine. properties: bootstrap: - description: Bootstrap is a reference to a local struct which encapsulates + description: |- + Bootstrap is a reference to a local struct which encapsulates fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference is - optional to allow users/operators to specify Bootstrap.Data - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.Data without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part - of an object. TODO: this design is not final and this field - is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic data: - description: "Data contains the bootstrap data, such as cloud-init - details scripts. If nil, the Machine should remain in the Pending - state. \n Deprecated: Switch to DataSecretName." + description: |- + Data contains the bootstrap data, such as cloud-init details scripts. + If nil, the Machine should remain in the Pending state. + + + Deprecated: Switch to DataSecretName. type: string dataSecretName: - description: DataSecretName is the name of the secret that stores - the bootstrap data script. If nil, the Machine should remain - in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -119,73 +139,79 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine will - be created in. Must match a key in the FailureDomains map stored - on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to a custom - resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time that the - controller will spend on draining a node. The default value is 0, - meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string providerID: - description: ProviderID is the identification ID of the machine provided - by the provider. This field must match the provider ID as seen on - the node object corresponding to this machine. This field is required - by higher level consumers of cluster-api. Example use case is cluster - autoscaler with cluster-api as provider. Clean-up logic in the autoscaler - compares machines to nodes to find out machines at provider which - could not get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field is required - by autoscaler to be able to have a provider view of the list of - machines. Another list of nodes is queried from the k8s apiserver - and then a comparison is done to find out unregistered machines - and are marked for delete. This field will be set by the actuators - and consumed by higher level entities like autoscaler that will + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. This - field is meant to be optionally used by bootstrap providers. + description: |- + Version defines the desired Kubernetes version. + This field is meant to be optionally used by bootstrap providers. type: string required: - bootstrap @@ -196,7 +222,8 @@ spec: description: MachineStatus defines the observed state of Machine. properties: addresses: - description: Addresses is a list of addresses assigned to the machine. + description: |- + Addresses is a list of addresses assigned to the machine. This field is copied from the infrastructure provider reference. items: description: MachineAddress contains information for the node's @@ -224,37 +251,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -262,34 +289,46 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the Machine and will contain a more - verbose string suitable for logging and human consumption. \n This - field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the Machine's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of Machines can be added as events - to the Machine object and/or logged in the controller's output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the Machine and will contain a succinct - value suitable for machine interpretation. \n This field should - not be set for transitive errors that a controller faces that are - expected to be fixed automatically over time (like service outages), - but instead indicate that something is fundamentally wrong with - the Machine's spec or the configuration of the controller, and that - manual intervention is required. Examples of terminal errors would - be invalid combinations of settings in the spec, values that are - unsupported by the controller, or the responsible controller itself - being critically misconfigured. \n Any transient errors that occur - during the reconciliation of Machines can be added as events to - the Machine object and/or logged in the controller's output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string infrastructureReady: description: InfrastructureReady is the state of the infrastructure @@ -307,33 +346,40 @@ spec: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -343,18 +389,20 @@ spec: format: int64 type: integer phase: - description: Phase represents the current phase of machine actuation. + description: |- + Phase represents the current phase of machine actuation. E.g. Pending, Running, Terminating, Failed etc. type: string version: - description: Version specifies the current version of Kubernetes running + description: |- + Version specifies the current version of Kubernetes running on the corresponding Node. This is meant to be a means of bubbling - up status from the Node to the Machine. It is entirely optional, - but useful for end-user UX if it’s present. + up status from the Node to the Machine. + It is entirely optional, but useful for end-user UX if it’s present. type: string type: object type: object - served: true + served: false storage: false subresources: status: {} @@ -384,21 +432,30 @@ spec: name: NodeName priority: 1 type: string + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "Machine is the Schema for the machines API. \n Deprecated: This - type will be removed in one of the next releases." + description: |- + Machine is the Schema for the machines API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -406,53 +463,62 @@ spec: description: MachineSpec defines the desired state of Machine. properties: bootstrap: - description: Bootstrap is a reference to a local struct which encapsulates + description: |- + Bootstrap is a reference to a local struct which encapsulates fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference is - optional to allow users/operators to specify Bootstrap.DataSecretName - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.DataSecretName without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part - of an object. TODO: this design is not final and this field - is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic dataSecretName: - description: DataSecretName is the name of the secret that stores - the bootstrap data script. If nil, the Machine should remain - in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -461,73 +527,79 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine will - be created in. Must match a key in the FailureDomains map stored - on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to a custom - resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time that the - controller will spend on draining a node. The default value is 0, - meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string providerID: - description: ProviderID is the identification ID of the machine provided - by the provider. This field must match the provider ID as seen on - the node object corresponding to this machine. This field is required - by higher level consumers of cluster-api. Example use case is cluster - autoscaler with cluster-api as provider. Clean-up logic in the autoscaler - compares machines to nodes to find out machines at provider which - could not get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field is required - by autoscaler to be able to have a provider view of the list of - machines. Another list of nodes is queried from the k8s apiserver - and then a comparison is done to find out unregistered machines - and are marked for delete. This field will be set by the actuators - and consumed by higher level entities like autoscaler that will + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. This - field is meant to be optionally used by bootstrap providers. + description: |- + Version defines the desired Kubernetes version. + This field is meant to be optionally used by bootstrap providers. type: string required: - bootstrap @@ -538,7 +610,8 @@ spec: description: MachineStatus defines the observed state of Machine. properties: addresses: - description: Addresses is a list of addresses assigned to the machine. + description: |- + Addresses is a list of addresses assigned to the machine. This field is copied from the infrastructure provider reference. items: description: MachineAddress contains information for the node's @@ -566,37 +639,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -604,34 +677,46 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the Machine and will contain a more - verbose string suitable for logging and human consumption. \n This - field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the Machine's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of Machines can be added as events - to the Machine object and/or logged in the controller's output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the Machine and will contain a succinct - value suitable for machine interpretation. \n This field should - not be set for transitive errors that a controller faces that are - expected to be fixed automatically over time (like service outages), - but instead indicate that something is fundamentally wrong with - the Machine's spec or the configuration of the controller, and that - manual intervention is required. Examples of terminal errors would - be invalid combinations of settings in the spec, values that are - unsupported by the controller, or the responsible controller itself - being critically misconfigured. \n Any transient errors that occur - during the reconciliation of Machines can be added as events to - the Machine object and/or logged in the controller's output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string infrastructureReady: description: InfrastructureReady is the state of the infrastructure @@ -643,8 +728,9 @@ spec: format: date-time type: string nodeInfo: - description: 'NodeInfo is a set of ids/uuids to uniquely identify - the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info' + description: |- + NodeInfo is a set of ids/uuids to uniquely identify the node. + More info: https://kubernetes.io/docs/concepts/nodes/node/#info properties: architecture: description: The Architecture reported by the node @@ -667,9 +753,10 @@ spec: description: Kubelet Version reported by the node. type: string machineID: - description: 'MachineID reported by the node. For unique machine - identification in the cluster this field is preferred. Learn - more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html' + description: |- + MachineID reported by the node. For unique machine identification + in the cluster this field is preferred. Learn more from man(5) + machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html type: string operatingSystem: description: The Operating System reported by the node @@ -679,9 +766,10 @@ spec: (e.g. Debian GNU/Linux 7 (wheezy)). type: string systemUUID: - description: SystemUUID reported by the node. For unique machine - identification MachineID is preferred. This field is specific - to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid + description: |- + SystemUUID reported by the node. For unique machine identification + MachineID is preferred. This field is specific to Red Hat hosts + https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid type: string required: - architecture @@ -702,33 +790,40 @@ spec: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -738,18 +833,20 @@ spec: format: int64 type: integer phase: - description: Phase represents the current phase of machine actuation. + description: |- + Phase represents the current phase of machine actuation. E.g. Pending, Running, Terminating, Failed etc. type: string version: - description: Version specifies the current version of Kubernetes running + description: |- + Version specifies the current version of Kubernetes running on the corresponding Node. This is meant to be a means of bubbling - up status from the Node to the Machine. It is entirely optional, - but useful for end-user UX if it’s present. + up status from the Node to the Machine. + It is entirely optional, but useful for end-user UX if it’s present. type: string type: object type: object - served: true + served: false storage: false subresources: status: {} @@ -784,14 +881,19 @@ spec: description: Machine is the Schema for the machines API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -799,53 +901,62 @@ spec: description: MachineSpec defines the desired state of Machine. properties: bootstrap: - description: Bootstrap is a reference to a local struct which encapsulates + description: |- + Bootstrap is a reference to a local struct which encapsulates fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference is - optional to allow users/operators to specify Bootstrap.DataSecretName - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.DataSecretName without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part - of an object. TODO: this design is not final and this field - is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic dataSecretName: - description: DataSecretName is the name of the secret that stores - the bootstrap data script. If nil, the Machine should remain - in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -854,85 +965,90 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine will - be created in. Must match a key in the FailureDomains map stored - on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to a custom - resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDeletionTimeout: - description: NodeDeletionTimeout defines how long the controller will - attempt to delete the Node that the Machine hosts after the Machine - is marked for deletion. A duration of 0 will retry deletion indefinitely. + description: |- + NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. type: string nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time that the - controller will spend on draining a node. The default value is 0, - meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string nodeVolumeDetachTimeout: - description: NodeVolumeDetachTimeout is the total amount of time that - the controller will spend on waiting for all volumes to be detached. - The default value is 0, meaning that the volumes can be detached - without any time limitations. + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. type: string providerID: - description: ProviderID is the identification ID of the machine provided - by the provider. This field must match the provider ID as seen on - the node object corresponding to this machine. This field is required - by higher level consumers of cluster-api. Example use case is cluster - autoscaler with cluster-api as provider. Clean-up logic in the autoscaler - compares machines to nodes to find out machines at provider which - could not get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field is required - by autoscaler to be able to have a provider view of the list of - machines. Another list of nodes is queried from the k8s apiserver - and then a comparison is done to find out unregistered machines - and are marked for delete. This field will be set by the actuators - and consumed by higher level entities like autoscaler that will + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. This - field is meant to be optionally used by bootstrap providers. + description: |- + Version defines the desired Kubernetes version. + This field is meant to be optionally used by bootstrap providers. type: string required: - bootstrap @@ -943,7 +1059,8 @@ spec: description: MachineStatus defines the observed state of Machine. properties: addresses: - description: Addresses is a list of addresses assigned to the machine. + description: |- + Addresses is a list of addresses assigned to the machine. This field is copied from the infrastructure provider reference. items: description: MachineAddress contains information for the node's @@ -965,8 +1082,9 @@ spec: description: BootstrapReady is the state of the bootstrap provider. type: boolean certificatesExpiryDate: - description: CertificatesExpiryDate is the expiry date of the machine - certificates. This value is only set for control plane machines. + description: |- + CertificatesExpiryDate is the expiry date of the machine certificates. + This value is only set for control plane machines. format: date-time type: string conditions: @@ -976,37 +1094,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -1015,34 +1133,46 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the Machine and will contain a more - verbose string suitable for logging and human consumption. \n This - field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the Machine's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of Machines can be added as events - to the Machine object and/or logged in the controller's output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the Machine and will contain a succinct - value suitable for machine interpretation. \n This field should - not be set for transitive errors that a controller faces that are - expected to be fixed automatically over time (like service outages), - but instead indicate that something is fundamentally wrong with - the Machine's spec or the configuration of the controller, and that - manual intervention is required. Examples of terminal errors would - be invalid combinations of settings in the spec, values that are - unsupported by the controller, or the responsible controller itself - being critically misconfigured. \n Any transient errors that occur - during the reconciliation of Machines can be added as events to - the Machine object and/or logged in the controller's output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string infrastructureReady: description: InfrastructureReady is the state of the infrastructure @@ -1054,8 +1184,9 @@ spec: format: date-time type: string nodeInfo: - description: 'NodeInfo is a set of ids/uuids to uniquely identify - the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info' + description: |- + NodeInfo is a set of ids/uuids to uniquely identify the node. + More info: https://kubernetes.io/docs/concepts/nodes/node/#info properties: architecture: description: The Architecture reported by the node @@ -1078,9 +1209,10 @@ spec: description: Kubelet Version reported by the node. type: string machineID: - description: 'MachineID reported by the node. For unique machine - identification in the cluster this field is preferred. Learn - more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html' + description: |- + MachineID reported by the node. For unique machine identification + in the cluster this field is preferred. Learn more from man(5) + machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html type: string operatingSystem: description: The Operating System reported by the node @@ -1090,9 +1222,10 @@ spec: (e.g. Debian GNU/Linux 7 (wheezy)). type: string systemUUID: - description: SystemUUID reported by the node. For unique machine - identification MachineID is preferred. This field is specific - to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid + description: |- + SystemUUID reported by the node. For unique machine identification + MachineID is preferred. This field is specific to Red Hat hosts + https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid type: string required: - architecture @@ -1113,33 +1246,40 @@ spec: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic @@ -1149,7 +1289,8 @@ spec: format: int64 type: integer phase: - description: Phase represents the current phase of machine actuation. + description: |- + Phase represents the current phase of machine actuation. E.g. Pending, Running, Terminating, Failed etc. type: string type: object diff --git a/config/crd/bases/cluster.x-k8s.io_machinesets.yaml b/config/crd/bases/cluster.x-k8s.io_machinesets.yaml index fb8ee2fd691e..68f0bfbc2c7a 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinesets.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinesets.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: machinesets.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -32,21 +31,30 @@ spec: jsonPath: .status.readyReplicas name: Ready type: integer + deprecated: true name: v1alpha3 schema: openAPIV3Schema: - description: "MachineSet is the Schema for the machinesets API. \n Deprecated: - This type will be removed in one of the next releases." + description: |- + MachineSet is the Schema for the machinesets API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -59,179 +67,203 @@ spec: minLength: 1 type: string deletePolicy: - description: DeletePolicy defines the policy used to identify nodes - to delete when downscaling. Defaults to "Random". Valid values - are "Random, "Newest", "Oldest" + description: |- + DeletePolicy defines the policy used to identify nodes to delete when downscaling. + Defaults to "Random". Valid values are "Random, "Newest", "Oldest" enum: - Random - Newest - Oldest type: string minReadySeconds: - description: MinReadySeconds is the minimum number of seconds for - which a newly created machine should be ready. Defaults to 0 (machine - will be considered available as soon as it is ready) + description: |- + MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. + Defaults to 0 (machine will be considered available as soon as it is ready) format: int32 type: integer replicas: - description: Replicas is the number of desired replicas. This is a - pointer to distinguish between explicit zero and unspecified. Defaults - to 1. + description: |- + Replicas is the number of desired replicas. + This is a pointer to distinguish between explicit zero and unspecified. + Defaults to 1. format: int32 type: integer selector: - description: 'Selector is a label query over machines that should - match the replica count. Label keys and values that must match in - order to be controlled by this MachineSet. It must match the machine - template''s labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + description: |- + Selector is a label query over machines that should match the replica count. + Label keys and values that must match in order to be controlled by this MachineSet. + It must match the machine template's labels. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic template: - description: Template is the object that describes the machine that - will be created if insufficient replicas are detected. Object references - to custom resources are treated as templates. + description: |- + Template is the object that describes the machine that will be created if + insufficient replicas are detected. + Object references to custom resources are treated as templates. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object generateName: - description: "GenerateName is an optional prefix, used by - the server, to generate a unique name ONLY IF the Name field - has not been provided. If this field is used, the name returned - to the client will be different than the name passed. This - value will also be combined with a unique suffix. The provided - value has the same validation rules as the Name field, and - may be truncated by the length of the suffix required to - make the value unique on the server. \n If this field is - specified and the generated name exists, the server will - NOT return a 409 - instead, it will either return 201 Created - or 500 with Reason ServerTimeout indicating a unique name - could not be found in the time allotted, and the client - should retry (optionally after the time indicated in the - Retry-After header). \n Applied only if Name is not specified. + description: |- + GenerateName is an optional prefix, used by the server, to generate a unique + name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique suffix. + The provided value has the same validation rules as the Name field, + and may be truncated by the length of the suffix required to make the value + unique on the server. + + + If this field is specified and the generated name exists, the server will + NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + ServerTimeout indicating a unique name could not be found in the time allotted, and the client + should retry (optionally after the time indicated in the Retry-After header). + + + Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency - \n Deprecated: This field has no function and is going to - be removed in a next release." + + + Deprecated: This field has no function and is going to be removed in a next release. type: string labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object name: - description: "Name must be unique within a namespace. Is required - when creating resources, although some resources may allow - a client to request the generation of an appropriate name - automatically. Name is primarily intended for creation idempotence - and configuration definition. Cannot be updated. More info: - http://kubernetes.io/docs/user-guide/identifiers#names \n - Deprecated: This field has no function and is going to be - removed in a next release." + description: |- + Name must be unique within a namespace. Is required when creating resources, although + some resources may allow a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence and configuration + definition. + Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names + + + Deprecated: This field has no function and is going to be removed in a next release. type: string namespace: - description: "Namespace defines the space within each name - must be unique. An empty namespace is equivalent to the - \"default\" namespace, but \"default\" is the canonical - representation. Not all objects are required to be scoped - to a namespace - the value of this field for those objects - will be empty. \n Must be a DNS_LABEL. Cannot be updated. + description: |- + Namespace defines the space within each name must be unique. An empty namespace is + equivalent to the "default" namespace, but "default" is the canonical representation. + Not all objects are required to be scoped to a namespace - the value of this field for + those objects will be empty. + + + Must be a DNS_LABEL. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces - \n Deprecated: This field has no function and is going to - be removed in a next release." + + + Deprecated: This field has no function and is going to be removed in a next release. type: string ownerReferences: - description: "List of objects depended by this object. If - ALL objects in the list have been deleted, this object will - be garbage collected. If this object is managed by a controller, - then an entry in this list will point to this controller, - with the controller field set to true. There cannot be more - than one managing controller. \n Deprecated: This field - has no function and is going to be removed in a next release." + description: |- + List of objects depended by this object. If ALL objects in the list have + been deleted, this object will be garbage collected. If this object is managed by a controller, + then an entry in this list will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. + + + Deprecated: This field has no function and is going to be removed in a next release. items: - description: OwnerReference contains enough information - to let you identify an owning object. An owning object - must be in the same namespace as the dependent, or be - cluster-scoped, so there is no namespace field. + description: |- + OwnerReference contains enough information to let you identify an owning + object. An owning object must be in the same namespace as the dependent, or + be cluster-scoped, so there is no namespace field. properties: apiVersion: description: API version of the referent. type: string blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the - key-value store until this reference is removed. See - https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion - for how the garbage collector interacts with this - field and enforces the foreground deletion. Defaults - to false. To set this field, a user needs "delete" - permission of the owner, otherwise 422 (Unprocessable - Entity) will be returned. + description: |- + If true, AND if the owner has the "foregroundDeletion" finalizer, then + the owner cannot be deleted from the key-value store until this + reference is removed. + See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + for how the garbage collector interacts with this field and enforces the foreground deletion. + Defaults to false. + To set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. type: boolean controller: description: If true, this reference points to the managing controller. type: boolean kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names type: string uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids type: string required: - apiVersion @@ -243,67 +275,75 @@ spec: type: array type: object spec: - description: 'Specification of the desired behavior of the machine. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + description: |- + Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: bootstrap: - description: Bootstrap is a reference to a local struct which - encapsulates fields to configure the Machine’s bootstrapping - mechanism. + description: |- + Bootstrap is a reference to a local struct which encapsulates + fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference - is optional to allow users/operators to specify Bootstrap.Data - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.Data without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container - that triggered the event) or if no container name - is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part - of an object. TODO: this design is not final and - this field is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this - reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic data: - description: "Data contains the bootstrap data, such as - cloud-init details scripts. If nil, the Machine should - remain in the Pending state. \n Deprecated: Switch to - DataSecretName." + description: |- + Data contains the bootstrap data, such as cloud-init details scripts. + If nil, the Machine should remain in the Pending state. + + + Deprecated: Switch to DataSecretName. type: string dataSecretName: - description: DataSecretName is the name of the secret - that stores the bootstrap data script. If nil, the Machine - should remain in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -312,76 +352,78 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine - will be created in. Must match a key in the FailureDomains - map stored on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to - a custom resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time - that the controller will spend on draining a node. The default - value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different - from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string providerID: - description: ProviderID is the identification ID of the machine - provided by the provider. This field must match the provider - ID as seen on the node object corresponding to this machine. - This field is required by higher level consumers of cluster-api. - Example use case is cluster autoscaler with cluster-api - as provider. Clean-up logic in the autoscaler compares machines - to nodes to find out machines at provider which could not - get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field - is required by autoscaler to be able to have a provider - view of the list of machines. Another list of nodes is queried - from the k8s apiserver and then a comparison is done to - find out unregistered machines and are marked for delete. - This field will be set by the actuators and consumed by - higher level entities like autoscaler that will be interfacing - with cluster-api as generic provider. + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will + be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. + description: |- + Version defines the desired Kubernetes version. This field is meant to be optionally used by bootstrap providers. type: string required: @@ -405,22 +447,27 @@ spec: failureMessage: type: string failureReason: - description: "In the event that there is a terminal problem reconciling - the replicas, both FailureReason and FailureMessage will be set. - FailureReason will be populated with a succinct value suitable for - machine interpretation, while FailureMessage will contain a more - verbose string suitable for logging and human consumption. \n These - fields should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the MachineTemplate's spec or the configuration of the - machine controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in - the spec, values that are unsupported by the machine controller, - or the responsible machine controller itself being critically misconfigured. - \n Any transient errors that occur during the reconciliation of - Machines can be added as events to the MachineSet object and/or - logged in the controller's output." + description: |- + In the event that there is a terminal problem reconciling the + replicas, both FailureReason and FailureMessage will be set. FailureReason + will be populated with a succinct value suitable for machine + interpretation, while FailureMessage will contain a more verbose + string suitable for logging and human consumption. + + + These fields should not be set for transitive errors that a + controller faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the MachineTemplate's spec or the configuration of + the machine controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the machine controller, or the + responsible machine controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the MachineSet object and/or logged in the + controller's output. type: string fullyLabeledReplicas: description: The number of replicas that have labels matching the @@ -442,14 +489,14 @@ spec: format: int32 type: integer selector: - description: 'Selector is the same as the label selector but in the - string format to avoid introspection by clients. The string will - be in the same format as the query-param syntax. More info about - label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors' + description: |- + Selector is the same as the label selector but in the string format to avoid introspection + by clients. The string will be in the same format as the query-param syntax. + More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors type: string type: object type: object - served: true + served: false storage: false subresources: scale: @@ -478,21 +525,30 @@ spec: jsonPath: .status.readyReplicas name: Ready type: integer + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "MachineSet is the Schema for the machinesets API. \n Deprecated: - This type will be removed in one of the next releases." + description: |- + MachineSet is the Schema for the machinesets API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -505,157 +561,170 @@ spec: minLength: 1 type: string deletePolicy: - description: DeletePolicy defines the policy used to identify nodes - to delete when downscaling. Defaults to "Random". Valid values - are "Random, "Newest", "Oldest" + description: |- + DeletePolicy defines the policy used to identify nodes to delete when downscaling. + Defaults to "Random". Valid values are "Random, "Newest", "Oldest" enum: - Random - Newest - Oldest type: string minReadySeconds: - description: MinReadySeconds is the minimum number of seconds for - which a newly created machine should be ready. Defaults to 0 (machine - will be considered available as soon as it is ready) + description: |- + MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. + Defaults to 0 (machine will be considered available as soon as it is ready) format: int32 type: integer replicas: default: 1 - description: Replicas is the number of desired replicas. This is a - pointer to distinguish between explicit zero and unspecified. Defaults - to 1. + description: |- + Replicas is the number of desired replicas. + This is a pointer to distinguish between explicit zero and unspecified. + Defaults to 1. format: int32 type: integer selector: - description: 'Selector is a label query over machines that should - match the replica count. Label keys and values that must match in - order to be controlled by this MachineSet. It must match the machine - template''s labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + description: |- + Selector is a label query over machines that should match the replica count. + Label keys and values that must match in order to be controlled by this MachineSet. + It must match the machine template's labels. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic template: - description: Template is the object that describes the machine that - will be created if insufficient replicas are detected. Object references - to custom resources are treated as templates. + description: |- + Template is the object that describes the machine that will be created if + insufficient replicas are detected. + Object references to custom resources are treated as templates. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: - description: 'Specification of the desired behavior of the machine. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + description: |- + Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: bootstrap: - description: Bootstrap is a reference to a local struct which - encapsulates fields to configure the Machine’s bootstrapping - mechanism. + description: |- + Bootstrap is a reference to a local struct which encapsulates + fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference - is optional to allow users/operators to specify Bootstrap.DataSecretName - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.DataSecretName without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container - that triggered the event) or if no container name - is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part - of an object. TODO: this design is not final and - this field is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this - reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic dataSecretName: - description: DataSecretName is the name of the secret - that stores the bootstrap data script. If nil, the Machine - should remain in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -664,76 +733,78 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine - will be created in. Must match a key in the FailureDomains - map stored on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to - a custom resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time - that the controller will spend on draining a node. The default - value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different - from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string providerID: - description: ProviderID is the identification ID of the machine - provided by the provider. This field must match the provider - ID as seen on the node object corresponding to this machine. - This field is required by higher level consumers of cluster-api. - Example use case is cluster autoscaler with cluster-api - as provider. Clean-up logic in the autoscaler compares machines - to nodes to find out machines at provider which could not - get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field - is required by autoscaler to be able to have a provider - view of the list of machines. Another list of nodes is queried - from the k8s apiserver and then a comparison is done to - find out unregistered machines and are marked for delete. - This field will be set by the actuators and consumed by - higher level entities like autoscaler that will be interfacing - with cluster-api as generic provider. + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will + be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. + description: |- + Version defines the desired Kubernetes version. This field is meant to be optionally used by bootstrap providers. type: string required: @@ -761,37 +832,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -801,22 +872,27 @@ spec: failureMessage: type: string failureReason: - description: "In the event that there is a terminal problem reconciling - the replicas, both FailureReason and FailureMessage will be set. - FailureReason will be populated with a succinct value suitable for - machine interpretation, while FailureMessage will contain a more - verbose string suitable for logging and human consumption. \n These - fields should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the MachineTemplate's spec or the configuration of the - machine controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in - the spec, values that are unsupported by the machine controller, - or the responsible machine controller itself being critically misconfigured. - \n Any transient errors that occur during the reconciliation of - Machines can be added as events to the MachineSet object and/or - logged in the controller's output." + description: |- + In the event that there is a terminal problem reconciling the + replicas, both FailureReason and FailureMessage will be set. FailureReason + will be populated with a succinct value suitable for machine + interpretation, while FailureMessage will contain a more verbose + string suitable for logging and human consumption. + + + These fields should not be set for transitive errors that a + controller faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the MachineTemplate's spec or the configuration of + the machine controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the machine controller, or the + responsible machine controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the MachineSet object and/or logged in the + controller's output. type: string fullyLabeledReplicas: description: The number of replicas that have labels matching the @@ -838,14 +914,14 @@ spec: format: int32 type: integer selector: - description: 'Selector is the same as the label selector but in the - string format to avoid introspection by clients. The string will - be in the same format as the query-param syntax. More info about - label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors' + description: |- + Selector is the same as the label selector but in the string format to avoid introspection + by clients. The string will be in the same format as the query-param syntax. + More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors type: string type: object type: object - served: true + served: false storage: false subresources: scale: @@ -889,14 +965,19 @@ spec: description: MachineSet is the Schema for the machinesets API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -909,157 +990,184 @@ spec: minLength: 1 type: string deletePolicy: - description: DeletePolicy defines the policy used to identify nodes - to delete when downscaling. Defaults to "Random". Valid values - are "Random, "Newest", "Oldest" + description: |- + DeletePolicy defines the policy used to identify nodes to delete when downscaling. + Defaults to "Random". Valid values are "Random, "Newest", "Oldest" enum: - Random - Newest - Oldest type: string minReadySeconds: - description: MinReadySeconds is the minimum number of seconds for - which a newly created machine should be ready. Defaults to 0 (machine - will be considered available as soon as it is ready) + description: |- + MinReadySeconds is the minimum number of seconds for which a Node for a newly created machine should be ready before considering the replica available. + Defaults to 0 (machine will be considered available as soon as the Node is ready) format: int32 type: integer replicas: - default: 1 - description: Replicas is the number of desired replicas. This is a - pointer to distinguish between explicit zero and unspecified. Defaults - to 1. + description: |- + Replicas is the number of desired replicas. + This is a pointer to distinguish between explicit zero and unspecified. + + + Defaults to: + * if the Kubernetes autoscaler min size and max size annotations are set: + - if it's a new MachineSet, use min size + - if the replicas field of the old MachineSet is < min size, use min size + - if the replicas field of the old MachineSet is > max size, use max size + - if the replicas field of the old MachineSet is in the (min size, max size) range, keep the value from the oldMS + * otherwise use 1 + Note: Defaulting will be run whenever the replicas field is not set: + * A new MachineSet is created with replicas not set. + * On an existing MachineSet the replicas field was first set and is now unset. + Those cases are especially relevant for the following Kubernetes autoscaler use cases: + * A new MachineSet is created and replicas should be managed by the autoscaler + * An existing MachineSet which initially wasn't controlled by the autoscaler + should be later controlled by the autoscaler format: int32 type: integer selector: - description: 'Selector is a label query over machines that should - match the replica count. Label keys and values that must match in - order to be controlled by this MachineSet. It must match the machine - template''s labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + description: |- + Selector is a label query over machines that should match the replica count. + Label keys and values that must match in order to be controlled by this MachineSet. + It must match the machine template's labels. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic template: - description: Template is the object that describes the machine that - will be created if insufficient replicas are detected. Object references - to custom resources are treated as templates. + description: |- + Template is the object that describes the machine that will be created if + insufficient replicas are detected. + Object references to custom resources are treated as templates. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: - description: 'Specification of the desired behavior of the machine. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + description: |- + Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: bootstrap: - description: Bootstrap is a reference to a local struct which - encapsulates fields to configure the Machine’s bootstrapping - mechanism. + description: |- + Bootstrap is a reference to a local struct which encapsulates + fields to configure the Machine’s bootstrapping mechanism. properties: configRef: - description: ConfigRef is a reference to a bootstrap provider-specific - resource that holds configuration details. The reference - is optional to allow users/operators to specify Bootstrap.DataSecretName - without the need of a controller. + description: |- + ConfigRef is a reference to a bootstrap provider-specific resource + that holds configuration details. The reference is optional to + allow users/operators to specify Bootstrap.DataSecretName without + the need of a controller. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container - that triggered the event) or if no container name - is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part - of an object. TODO: this design is not final and - this field is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this - reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic dataSecretName: - description: DataSecretName is the name of the secret - that stores the bootstrap data script. If nil, the Machine - should remain in the Pending state. + description: |- + DataSecretName is the name of the secret that stores the bootstrap data script. + If nil, the Machine should remain in the Pending state. type: string type: object clusterName: @@ -1068,88 +1176,89 @@ spec: minLength: 1 type: string failureDomain: - description: FailureDomain is the failure domain the machine - will be created in. Must match a key in the FailureDomains - map stored on the cluster object. + description: |- + FailureDomain is the failure domain the machine will be created in. + Must match a key in the FailureDomains map stored on the cluster object. type: string infrastructureRef: - description: InfrastructureRef is a required reference to - a custom resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic nodeDeletionTimeout: - description: NodeDeletionTimeout defines how long the controller - will attempt to delete the Node that the Machine hosts after - the Machine is marked for deletion. A duration of 0 will - retry deletion indefinitely. Defaults to 10 seconds. + description: |- + NodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. type: string nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time - that the controller will spend on draining a node. The default - value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different - from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string nodeVolumeDetachTimeout: - description: NodeVolumeDetachTimeout is the total amount of - time that the controller will spend on waiting for all volumes - to be detached. The default value is 0, meaning that the - volumes can be detached without any time limitations. + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. type: string providerID: - description: ProviderID is the identification ID of the machine - provided by the provider. This field must match the provider - ID as seen on the node object corresponding to this machine. - This field is required by higher level consumers of cluster-api. - Example use case is cluster autoscaler with cluster-api - as provider. Clean-up logic in the autoscaler compares machines - to nodes to find out machines at provider which could not - get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field - is required by autoscaler to be able to have a provider - view of the list of machines. Another list of nodes is queried - from the k8s apiserver and then a comparison is done to - find out unregistered machines and are marked for delete. - This field will be set by the actuators and consumed by - higher level entities like autoscaler that will be interfacing - with cluster-api as generic provider. + description: |- + ProviderID is the identification ID of the machine provided by the provider. + This field must match the provider ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + and then a comparison is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level entities like autoscaler that will + be interfacing with cluster-api as generic provider. type: string version: - description: Version defines the desired Kubernetes version. + description: |- + Version defines the desired Kubernetes version. This field is meant to be optionally used by bootstrap providers. type: string required: @@ -1177,37 +1286,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -1218,22 +1327,27 @@ spec: failureMessage: type: string failureReason: - description: "In the event that there is a terminal problem reconciling - the replicas, both FailureReason and FailureMessage will be set. - FailureReason will be populated with a succinct value suitable for - machine interpretation, while FailureMessage will contain a more - verbose string suitable for logging and human consumption. \n These - fields should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the MachineTemplate's spec or the configuration of the - machine controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in - the spec, values that are unsupported by the machine controller, - or the responsible machine controller itself being critically misconfigured. - \n Any transient errors that occur during the reconciliation of - Machines can be added as events to the MachineSet object and/or - logged in the controller's output." + description: |- + In the event that there is a terminal problem reconciling the + replicas, both FailureReason and FailureMessage will be set. FailureReason + will be populated with a succinct value suitable for machine + interpretation, while FailureMessage will contain a more verbose + string suitable for logging and human consumption. + + + These fields should not be set for transitive errors that a + controller faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the MachineTemplate's spec or the configuration of + the machine controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the machine controller, or the + responsible machine controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the MachineSet object and/or logged in the + controller's output. type: string fullyLabeledReplicas: description: The number of replicas that have labels matching the @@ -1255,10 +1369,10 @@ spec: format: int32 type: integer selector: - description: 'Selector is the same as the label selector but in the - string format to avoid introspection by clients. The string will - be in the same format as the query-param syntax. More info about - label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors' + description: |- + Selector is the same as the label selector but in the string format to avoid introspection + by clients. The string will be in the same format as the query-param syntax. + More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors type: string type: object type: object diff --git a/config/crd/bases/ipam.cluster.x-k8s.io_ipaddressclaims.yaml b/config/crd/bases/ipam.cluster.x-k8s.io_ipaddressclaims.yaml index 9746140c46c3..8d750247755e 100644 --- a/config/crd/bases/ipam.cluster.x-k8s.io_ipaddressclaims.yaml +++ b/config/crd/bases/ipam.cluster.x-k8s.io_ipaddressclaims.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: ipaddressclaims.ipam.cluster.x-k8s.io spec: group: ipam.cluster.x-k8s.io @@ -26,20 +25,29 @@ spec: jsonPath: .spec.poolRef.kind name: Pool Kind type: string + - description: Time duration since creation of IPAdressClaim + jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: description: IPAddressClaim is the Schema for the ipaddressclaim API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -51,10 +59,10 @@ spec: should be created. properties: apiGroup: - description: APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in - the core API group. For any other third-party types, APIGroup - is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -63,10 +71,10 @@ spec: description: Name is the name of resource being referenced type: string required: - - apiGroup - kind - name type: object + x-kubernetes-map-type: atomic required: - poolRef type: object @@ -78,10 +86,18 @@ spec: for this claim. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object + x-kubernetes-map-type: atomic conditions: description: Conditions summarises the current state of the IPAddressClaim items: @@ -89,37 +105,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -127,8 +143,143 @@ spec: - type type: object type: array + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Name of the pool to allocate an address from + jsonPath: .spec.poolRef.name + name: Pool Name + type: string + - description: Kind of the pool to allocate an address from + jsonPath: .spec.poolRef.kind + name: Pool Kind + type: string + - description: Time duration since creation of IPAdressClaim + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: IPAddressClaim is the Schema for the ipaddressclaim API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IPAddressClaimSpec is the desired state of an IPAddressClaim. + properties: + clusterName: + description: ClusterName is the name of the Cluster this object belongs + to. + type: string + poolRef: + description: PoolRef is a reference to the pool from which an IP address + should be created. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic required: - - addressRef + - poolRef + type: object + status: + description: IPAddressClaimStatus is the observed status of a IPAddressClaim. + properties: + addressRef: + description: AddressRef is a reference to the address that was created + for this claim. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + conditions: + description: Conditions summarises the current state of the IPAddressClaim + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + A human readable message indicating details about the transition. + This field may be empty. + type: string + reason: + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. + type: string + severity: + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array type: object type: object served: true diff --git a/config/crd/bases/ipam.cluster.x-k8s.io_ipaddresses.yaml b/config/crd/bases/ipam.cluster.x-k8s.io_ipaddresses.yaml index c5af26e2f876..01e75ad0797b 100644 --- a/config/crd/bases/ipam.cluster.x-k8s.io_ipaddresses.yaml +++ b/config/crd/bases/ipam.cluster.x-k8s.io_ipaddresses.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: ipaddresses.ipam.cluster.x-k8s.io spec: group: ipam.cluster.x-k8s.io @@ -30,20 +29,29 @@ spec: jsonPath: .spec.poolRef.kind name: Pool Kind type: string + - description: Time duration since creation of IPAdress + jsonPath: .metadata.creationTimestamp + name: Age + type: date name: v1alpha1 schema: openAPIV3Schema: description: IPAddress is the Schema for the ipaddress API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -58,10 +66,18 @@ spec: created for. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object + x-kubernetes-map-type: atomic gateway: description: Gateway is the network gateway of the network the address is from. @@ -71,10 +87,10 @@ spec: was created from. properties: apiGroup: - description: APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in - the core API group. For any other third-party types, APIGroup - is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -83,17 +99,116 @@ spec: description: Name is the name of resource being referenced type: string required: - - apiGroup - kind - name type: object + x-kubernetes-map-type: atomic + prefix: + description: Prefix is the prefix of the address. + type: integer + required: + - address + - claimRef + - poolRef + - prefix + type: object + type: object + served: true + storage: false + subresources: {} + - additionalPrinterColumns: + - description: Address + jsonPath: .spec.address + name: Address + type: string + - description: Name of the pool the address is from + jsonPath: .spec.poolRef.name + name: Pool Name + type: string + - description: Kind of the pool the address is from + jsonPath: .spec.poolRef.kind + name: Pool Kind + type: string + - description: Time duration since creation of IPAdress + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: IPAddress is the Schema for the ipaddress API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IPAddressSpec is the desired state of an IPAddress. + properties: + address: + description: Address is the IP address. + type: string + claimRef: + description: ClaimRef is a reference to the claim this IPAddress was + created for. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + gateway: + description: Gateway is the network gateway of the network the address + is from. + type: string + poolRef: + description: PoolRef is a reference to the pool that this IPAddress + was created from. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic prefix: description: Prefix is the prefix of the address. type: integer required: - address - claimRef - - gateway - poolRef - prefix type: object diff --git a/config/crd/bases/runtime.cluster.x-k8s.io_extensionconfigs.yaml b/config/crd/bases/runtime.cluster.x-k8s.io_extensionconfigs.yaml index 99d7e4527a9d..0a2075257490 100644 --- a/config/crd/bases/runtime.cluster.x-k8s.io_extensionconfigs.yaml +++ b/config/crd/bases/runtime.cluster.x-k8s.io_extensionconfigs.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: extensionconfigs.runtime.cluster.x-k8s.io spec: group: runtime.cluster.x-k8s.io @@ -30,14 +29,19 @@ spec: description: ExtensionConfig is the Schema for the ExtensionConfig API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -54,10 +58,12 @@ spec: format: byte type: string service: - description: "Service is a reference to the Kubernetes service - for the Extension server. Note: Exactly one of `url` or `service` - must be specified. \n If the Extension server is running within - a cluster, then you should use `service`." + description: |- + Service is a reference to the Kubernetes service for the Extension server. + Note: Exactly one of `url` or `service` must be specified. + + + If the Extension server is running within a cluster, then you should use `service`. properties: name: description: Name is the name of the service. @@ -66,14 +72,15 @@ spec: description: Namespace is the namespace of the service. type: string path: - description: Path is an optional URL path and if present may - be any string permissible in a URL. If a path is set it - will be used as prefix to the hook-specific path. + description: |- + Path is an optional URL path and if present may be any string permissible in + a URL. If a path is set it will be used as prefix to the hook-specific path. type: string port: - description: Port is the port on the service that's hosting - the Extension server. Defaults to 443. Port should be a - valid port number (1-65535, inclusive). + description: |- + Port is the port on the service that's hosting the Extension server. + Defaults to 443. + Port should be a valid port number (1-65535, inclusive). format: int32 type: integer required: @@ -81,71 +88,84 @@ spec: - namespace type: object url: - description: "URL gives the location of the Extension server, - in standard URL form (`scheme://host:port/path`). Note: Exactly - one of `url` or `service` must be specified. \n The scheme must - be \"https\". \n The `host` should not refer to a service running - in the cluster; use the `service` field instead. \n A path is - optional, and if present may be any string permissible in a - URL. If a path is set it will be used as prefix to the hook-specific - path. \n Attempting to use a user or basic auth e.g. \"user:password@\" - is not allowed. Fragments (\"#...\") and query parameters (\"?...\") - are not allowed either." + description: |- + URL gives the location of the Extension server, in standard URL form + (`scheme://host:port/path`). + Note: Exactly one of `url` or `service` must be specified. + + + The scheme must be "https". + + + The `host` should not refer to a service running in the cluster; use + the `service` field instead. + + + A path is optional, and if present may be any string permissible in + a URL. If a path is set it will be used as prefix to the hook-specific path. + + + Attempting to use a user or basic auth e.g. "user:password@" is not + allowed. Fragments ("#...") and query parameters ("?...") are not + allowed either. type: string type: object namespaceSelector: - description: NamespaceSelector decides whether to call the hook for - an object based on whether the namespace for that object matches - the selector. Defaults to the empty LabelSelector, which matches - all objects. + description: |- + NamespaceSelector decides whether to call the hook for an object based + on whether the namespace for that object matches the selector. + Defaults to the empty LabelSelector, which matches all objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic settings: additionalProperties: type: string - description: 'Settings defines key value pairs to be passed to all - calls to all supported RuntimeExtensions. Note: Settings can be - overridden on the ClusterClass.' + description: |- + Settings defines key value pairs to be passed to all calls + to all supported RuntimeExtensions. + Note: Settings can be overridden on the ClusterClass. type: object required: - clientConfig @@ -160,37 +180,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -206,9 +226,9 @@ spec: for a particular runtime hook registered by an Extension server. properties: failurePolicy: - description: FailurePolicy defines how failures in calls to - the ExtensionHandler should be handled by a client. Defaults - to Fail if not set. + description: |- + FailurePolicy defines how failures in calls to the ExtensionHandler should be handled by a client. + Defaults to Fail if not set. type: string name: description: Name is the unique name of the ExtensionHandler. @@ -229,9 +249,9 @@ spec: - hook type: object timeoutSeconds: - description: TimeoutSeconds defines the timeout duration for - client calls to the ExtensionHandler. Defaults to 10 is not - set. + description: |- + TimeoutSeconds defines the timeout duration for client calls to the ExtensionHandler. + Defaults to 10 is not set. format: int32 type: integer required: diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 0ea28e7bae1c..2893a4e8b95c 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -16,31 +16,31 @@ resources: - bases/ipam.cluster.x-k8s.io_ipaddressclaims.yaml # +kubebuilder:scaffold:crdkustomizeresource -patchesStrategicMerge: +patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD -- patches/webhook_in_clusterclasses.yaml -- patches/webhook_in_clusters.yaml -- patches/webhook_in_machinepools.yaml -- patches/webhook_in_machines.yaml -- patches/webhook_in_machinesets.yaml -- patches/webhook_in_machinedeployments.yaml -- patches/webhook_in_machinehealthchecks.yaml -- patches/webhook_in_clusterresourcesets.yaml -- patches/webhook_in_clusterresourcesetbindings.yaml +- path: patches/webhook_in_clusterclasses.yaml +- path: patches/webhook_in_clusters.yaml +- path: patches/webhook_in_machinepools.yaml +- path: patches/webhook_in_machines.yaml +- path: patches/webhook_in_machinesets.yaml +- path: patches/webhook_in_machinedeployments.yaml +- path: patches/webhook_in_machinehealthchecks.yaml +- path: patches/webhook_in_clusterresourcesets.yaml +- path: patches/webhook_in_clusterresourcesetbindings.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD -- patches/cainjection_in_clusterclasses.yaml -- patches/cainjection_in_clusters.yaml -- patches/cainjection_in_machinepools.yaml -- patches/cainjection_in_machines.yaml -- patches/cainjection_in_machinesets.yaml -- patches/cainjection_in_machinedeployments.yaml -- patches/cainjection_in_machinehealthchecks.yaml -- patches/cainjection_in_clusterresourcesets.yaml -- patches/cainjection_in_clusterresourcesetbindings.yaml +- path: patches/cainjection_in_clusterclasses.yaml +- path: patches/cainjection_in_clusters.yaml +- path: patches/cainjection_in_machinepools.yaml +- path: patches/cainjection_in_machines.yaml +- path: patches/cainjection_in_machinesets.yaml +- path: patches/cainjection_in_machinedeployments.yaml +- path: patches/cainjection_in_machinehealthchecks.yaml +- path: patches/cainjection_in_clusterresourcesets.yaml +- path: patches/cainjection_in_clusterresourcesetbindings.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml index 8e2d8d6b1774..2714ca48a89b 100644 --- a/config/crd/kustomizeconfig.yaml +++ b/config/crd/kustomizeconfig.yaml @@ -13,5 +13,3 @@ namespace: path: spec/conversion/webhook/clientConfig/service/namespace create: false -varReference: -- path: metadata/annotations diff --git a/config/crd/patches/cainjection_in_clusterclasses.yaml b/config/crd/patches/cainjection_in_clusterclasses.yaml index 4b3d83113df4..59f29099f38e 100644 --- a/config/crd/patches/cainjection_in_clusterclasses.yaml +++ b/config/crd/patches/cainjection_in_clusterclasses.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: clusterclasses.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_clusterresourcesetbindings.yaml b/config/crd/patches/cainjection_in_clusterresourcesetbindings.yaml index 7b54b56f62ae..a6df0122dc08 100644 --- a/config/crd/patches/cainjection_in_clusterresourcesetbindings.yaml +++ b/config/crd/patches/cainjection_in_clusterresourcesetbindings.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: clusterresourcesetbindings.addons.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_clusterresourcesets.yaml b/config/crd/patches/cainjection_in_clusterresourcesets.yaml index e3c0e73a81bd..4d193c0c4f03 100644 --- a/config/crd/patches/cainjection_in_clusterresourcesets.yaml +++ b/config/crd/patches/cainjection_in_clusterresourcesets.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: clusterresourcesets.addons.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_clusters.yaml b/config/crd/patches/cainjection_in_clusters.yaml index 2e891790e1db..871b0adf5d70 100644 --- a/config/crd/patches/cainjection_in_clusters.yaml +++ b/config/crd/patches/cainjection_in_clusters.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: clusters.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_machinedeployments.yaml b/config/crd/patches/cainjection_in_machinedeployments.yaml index 15e6de214210..6fe5699f8033 100644 --- a/config/crd/patches/cainjection_in_machinedeployments.yaml +++ b/config/crd/patches/cainjection_in_machinedeployments.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: machinedeployments.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_machinehealthchecks.yaml b/config/crd/patches/cainjection_in_machinehealthchecks.yaml index eb79d32e32af..6b45c097adc9 100644 --- a/config/crd/patches/cainjection_in_machinehealthchecks.yaml +++ b/config/crd/patches/cainjection_in_machinehealthchecks.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: machinehealthchecks.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_machinepools.yaml b/config/crd/patches/cainjection_in_machinepools.yaml index b5329a85f8ba..aa288820ae21 100644 --- a/config/crd/patches/cainjection_in_machinepools.yaml +++ b/config/crd/patches/cainjection_in_machinepools.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: machinepools.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_machines.yaml b/config/crd/patches/cainjection_in_machines.yaml index 0877175c539e..84d59e66adab 100644 --- a/config/crd/patches/cainjection_in_machines.yaml +++ b/config/crd/patches/cainjection_in_machines.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: machines.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_machinesets.yaml b/config/crd/patches/cainjection_in_machinesets.yaml index 0996162d64fe..efb0eb98e8ef 100644 --- a/config/crd/patches/cainjection_in_machinesets.yaml +++ b/config/crd/patches/cainjection_in_machinesets.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: machinesets.cluster.x-k8s.io diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 18aeca81afe9..8f7ceebb22c8 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -2,59 +2,130 @@ namespace: capi-system namePrefix: capi- -commonLabels: - cluster.x-k8s.io/provider: "cluster-api" +labels: +- includeSelectors: true + pairs: + cluster.x-k8s.io/provider: cluster-api resources: - namespace.yaml - -bases: - ../crd - ../rbac - ../manager - ../webhook - ../certmanager -patchesStrategicMerge: +patches: # Provide customizable hook for make targets. -- manager_image_patch.yaml -- manager_pull_policy.yaml +- path: manager_image_patch.yaml +- path: manager_pull_policy.yaml # Enable webhook. -- manager_webhook_patch.yaml +- path: manager_webhook_patch.yaml # Inject certificate in the webhook definition. -- webhookcainjection_patch.yaml +- path: webhookcainjection_patch.yaml # Ease the process of providing extra RBAC to the Cluster API manager for # non SIG Cluster Lifecycle-sponsored provider subprojects by using an # aggregated role -- manager_role_aggregation_patch.yaml +- path: manager_role_aggregation_patch.yaml -vars: - - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR - objref: - kind: Certificate +replacements: +- source: + fieldPath: .metadata.namespace + group: cert-manager.io + kind: Certificate + name: serving-cert + version: v1 + targets: + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + select: + kind: ValidatingWebhookConfiguration + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + select: + kind: MutatingWebhookConfiguration + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + select: + kind: CustomResourceDefinition + reject: + - name: ipaddressclaims.ipam.cluster.x-k8s.io + - name: ipaddresses.ipam.cluster.x-k8s.io + - name: extensionconfigs.runtime.cluster.x-k8s.io +- source: + fieldPath: .metadata.name + group: cert-manager.io + kind: Certificate + name: serving-cert + version: v1 + targets: + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + index: 1 + select: + kind: ValidatingWebhookConfiguration + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + index: 1 + select: + kind: MutatingWebhookConfiguration + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + index: 1 + select: + kind: CustomResourceDefinition + reject: + - name: ipaddressclaims.ipam.cluster.x-k8s.io + - name: ipaddresses.ipam.cluster.x-k8s.io + - name: extensionconfigs.runtime.cluster.x-k8s.io +- source: + fieldPath: .metadata.name + kind: Service + name: webhook-service + version: v1 + targets: + - fieldPaths: + - .spec.dnsNames.0 + - .spec.dnsNames.1 + options: + create: true + delimiter: . + select: group: cert-manager.io - version: v1 - name: serving-cert # this name should match the one in certificate.yaml - fieldref: - fieldpath: metadata.namespace - - name: CERTIFICATE_NAME - objref: kind: Certificate - group: cert-manager.io version: v1 - name: serving-cert # this name should match the one in certificate.yaml - - name: SERVICE_NAMESPACE # namespace of the service - objref: - kind: Service - version: v1 - name: webhook-service - fieldref: - fieldpath: metadata.namespace - - name: SERVICE_NAME - objref: - kind: Service +- source: + fieldPath: .metadata.namespace + kind: Service + name: webhook-service + version: v1 + targets: + - fieldPaths: + - .spec.dnsNames.0 + - .spec.dnsNames.1 + options: + create: true + delimiter: . + index: 1 + select: + group: cert-manager.io + kind: Certificate version: v1 - name: webhook-service - -configurations: - - kustomizeconfig.yaml diff --git a/config/default/kustomizeconfig.yaml b/config/default/kustomizeconfig.yaml deleted file mode 100644 index eb191e64d056..000000000000 --- a/config/default/kustomizeconfig.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# This configuration is for teaching kustomize how to update name ref and var substitution -varReference: -- kind: Deployment - path: spec/template/spec/volumes/secret/secretName diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml index bccef6d70db8..aeba3f1413f8 100644 --- a/config/default/manager_webhook_patch.yaml +++ b/config/default/manager_webhook_patch.yaml @@ -19,4 +19,4 @@ spec: volumes: - name: cert secret: - secretName: $(SERVICE_NAME)-cert + secretName: capi-webhook-service-cert diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml index 362c633431d4..b25eeace0b70 100644 --- a/config/default/webhookcainjection_patch.yaml +++ b/config/default/webhookcainjection_patch.yaml @@ -1,16 +1,16 @@ # This patch add annotation to admission webhook config and -# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. +# the variables CERTIFICATE_NAMESPACE and CERTIFICATE_NAME will be substituted by kustomize. # uncomment the following lines to enable mutating webhook apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: name: mutating-webhook-configuration annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: name: validating-webhook-configuration annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 5ac9b39670e8..5f537b5cbb61 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -21,8 +21,10 @@ spec: - /manager args: - "--leader-elect" - - "--metrics-bind-addr=localhost:8080" - - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=false},ClusterResourceSet=${EXP_CLUSTER_RESOURCE_SET:=false},ClusterTopology=${CLUSTER_TOPOLOGY:=false},RuntimeSDK=${EXP_RUNTIME_SDK:=false}" + - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" + - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" + - "--use-deprecated-infra-machine-naming=${CAPI_USE_DEPRECATED_INFRA_MACHINE_NAMING:=false}" + - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},ClusterResourceSet=${EXP_CLUSTER_RESOURCE_SET:=true},ClusterTopology=${CLUSTER_TOPOLOGY:=false},RuntimeSDK=${EXP_RUNTIME_SDK:=false},MachineSetPreflightChecks=${EXP_MACHINE_SET_PREFLIGHT_CHECKS:=false}" image: controller:latest name: manager env: @@ -42,6 +44,9 @@ spec: - containerPort: 9440 name: healthz protocol: TCP + - containerPort: 8443 + name: metrics + protocol: TCP readinessProbe: httpGet: path: /readyz @@ -58,6 +63,7 @@ spec: privileged: false runAsUser: 65532 runAsGroup: 65532 + terminationMessagePolicy: FallbackToLogsOnError terminationGracePeriodSeconds: 10 serviceAccountName: manager tolerations: diff --git a/config/metrics/crd-clusterrole.yaml b/config/metrics/crd-clusterrole.yaml new file mode 100644 index 000000000000..bcd5bde16ba3 --- /dev/null +++ b/config/metrics/crd-clusterrole.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-state-metrics-custom-resource-capi + labels: + kube-state-metrics/aggregate-to-manager: "true" +rules: +- apiGroups: + - cluster.x-k8s.io + resources: + - clusterclasses + - clusters + - machinedeployments + - machinepools + - machinesets + - machines + - machinehealthchecks + verbs: + - get + - list + - watch +- apiGroups: + - controlplane.cluster.x-k8s.io + resources: + - kubeadmcontrolplanes + verbs: + - get + - list + - watch +- apiGroups: + - bootstrap.cluster.x-k8s.io + resources: + - kubeadmconfigs + verbs: + - get + - list + - watch diff --git a/hack/observability/kube-state-metrics/crd-config.yaml b/config/metrics/crd-metrics-config.yaml similarity index 68% rename from hack/observability/kube-state-metrics/crd-config.yaml rename to config/metrics/crd-metrics-config.yaml index 230fba619e71..718af5a9bd9f 100644 --- a/hack/observability/kube-state-metrics/crd-config.yaml +++ b/config/metrics/crd-metrics-config.yaml @@ -2,6 +2,101 @@ kind: CustomResourceStateMetrics spec: resources: + - groupVersionKind: + group: cluster.x-k8s.io + kind: ClusterClass + version: v1beta1 + labelsFromPath: + name: + - metadata + - name + namespace: + - metadata + - namespace + uid: + - metadata + - uid + metricNamePrefix: capi_clusterclass + metrics: + - name: info + help: Information about a clusterclass. + each: + info: + # TODO: added metadata.name even it's already defined above as the metric doesn't work with empty labelsFromPath. + labelsFromPath: + name: + - metadata + - name + type: Info + - name: created + help: Unix creation timestamp. + each: + gauge: + path: + - metadata + - creationTimestamp + type: Gauge + - name: annotation_paused + help: Whether the clusterclass is paused and any of its resources will not be processed by the controllers. + each: + info: + path: + - metadata + - annotations + - cluster.x-k8s.io/paused + labelsFromPath: + paused_value: [] + type: Info + - name: status_condition + help: The condition of a clusterclass. + each: + stateSet: + labelName: status + labelsFromPath: + type: + - type + list: + - 'True' + - 'False' + - Unknown + path: + - status + - conditions + valueFrom: + - status + type: StateSet + - name: status_condition_last_transition_time + help: The condition last transition time of a clusterclass. + each: + gauge: + labelsFromPath: + type: + - type + status: + - status + path: + - status + - conditions + valueFrom: + - lastTransitionTime + type: Gauge + - name: owner + help: Owner references. + each: + info: + labelsFromPath: + owner_is_controller: + - controller + owner_kind: + - kind + owner_name: + - name + owner_uid: + - uid + path: + - metadata + - ownerReferences + type: Info - groupVersionKind: group: cluster.x-k8s.io kind: Cluster @@ -118,6 +213,21 @@ spec: valueFrom: - status type: StateSet + - name: status_condition_last_transition_time + help: The condition last transition time of a cluster. + each: + gauge: + labelsFromPath: + type: + - type + status: + - status + path: + - status + - conditions + valueFrom: + - lastTransitionTime + type: Gauge - groupVersionKind: group: controlplane.cluster.x-k8s.io kind: KubeadmControlPlane @@ -239,6 +349,120 @@ spec: valueFrom: - status type: StateSet + - name: status_condition_last_transition_time + help: The condition last transition time of a kubeadmcontrolplane. + each: + gauge: + labelsFromPath: + type: + - type + status: + - status + path: + - status + - conditions + valueFrom: + - lastTransitionTime + type: Gauge + - name: owner + help: Owner references. + each: + info: + labelsFromPath: + owner_is_controller: + - controller + owner_kind: + - kind + owner_name: + - name + owner_uid: + - uid + path: + - metadata + - ownerReferences + type: Info + - groupVersionKind: + group: bootstrap.cluster.x-k8s.io + kind: KubeadmConfig + version: v1beta1 + labelsFromPath: + cluster_name: + - metadata + - labels + - cluster.x-k8s.io/cluster-name + name: + - metadata + - name + namespace: + - metadata + - namespace + uid: + - metadata + - uid + metricNamePrefix: capi_kubeadmconfig + metrics: + - name: info + help: Information about a kubeadmconfig. + each: + info: + # TODO: added metadata.name even it's already defined above as the metric doesn't work with empty labelsFromPath. + labelsFromPath: + name: + - metadata + - name + type: Info + - name: created + help: Unix creation timestamp. + each: + gauge: + path: + - metadata + - creationTimestamp + type: Gauge + - name: annotation_paused + help: Whether the kubeadmconfig is paused and any of its resources will not be processed by the controllers. + each: + info: + path: + - metadata + - annotations + - cluster.x-k8s.io/paused + labelsFromPath: + paused_value: [] + type: Info + - name: status_condition + help: The condition of a kubeadmconfig. + each: + stateSet: + labelName: status + labelsFromPath: + type: + - type + list: + - 'True' + - 'False' + - Unknown + path: + - status + - conditions + valueFrom: + - status + type: StateSet + - name: status_condition_last_transition_time + help: The condition last transition time of a kubeadmconfig. + each: + gauge: + labelsFromPath: + type: + - type + status: + - status + path: + - status + - conditions + valueFrom: + - lastTransitionTime + type: Gauge - name: owner help: Owner references. each: @@ -309,6 +533,28 @@ spec: version: - spec - version + bootstrap_reference_kind: + - spec + - bootstrap + - configRef + - kind + bootstrap_reference_name: + - spec + - bootstrap + - configRef + - name + infrastructure_reference_kind: + - spec + - infrastructureRef + - kind + infrastructure_reference_name: + - spec + - infrastructureRef + - name + control_plane_name: + - metadata + - labels + - cluster.x-k8s.io/control-plane-name type: Info - name: addresses help: Address information about a machine. @@ -323,6 +569,15 @@ spec: address: - address type: Info + - name: status_certificatesexpirydate + help: Information about certificate expiration date of a control plane node. + each: + gauge: + nilIsZero: true + path: + - status + - certificatesExpiryDate + type: Gauge - name: status_noderef help: Information about the node reference of a machine. each: @@ -392,6 +647,21 @@ spec: valueFrom: - status type: StateSet + - name: status_condition_last_transition_time + help: The condition last transition time of a machine. + each: + gauge: + labelsFromPath: + type: + - type + status: + - status + path: + - status + - conditions + valueFrom: + - lastTransitionTime + type: Gauge - name: owner help: Owner references. each: @@ -428,6 +698,43 @@ spec: - uid metricNamePrefix: capi_machinedeployment metrics: + - name: info + help: Information about a machinedeployment. + each: + info: + labelsFromPath: + version: + - spec + - template + - spec + - version + bootstrap_reference_kind: + - spec + - template + - spec + - bootstrap + - configRef + - kind + bootstrap_reference_name: + - spec + - template + - spec + - bootstrap + - configRef + - name + infrastructure_reference_kind: + - spec + - template + - spec + - infrastructureRef + - kind + infrastructure_reference_name: + - spec + - template + - spec + - infrastructureRef + - name + type: Info - name: spec_paused help: Whether the machinedeployment is paused and any of its resources will not be processed by the controllers. each: @@ -562,6 +869,21 @@ spec: valueFrom: - status type: StateSet + - name: status_condition_last_transition_time + help: The condition last transition time of a machinedeployment. + each: + gauge: + labelsFromPath: + type: + - type + status: + - status + path: + - status + - conditions + valueFrom: + - lastTransitionTime + type: Gauge - name: owner help: Owner references. each: @@ -598,6 +920,16 @@ spec: - uid metricNamePrefix: capi_machinehealthcheck metrics: + - name: info + help: Information about a machinehealthcheck. + each: + info: + # TODO: added metadata.name even it's already defined above as the metric doesn't work with empty labelsFromPath. + labelsFromPath: + name: + - metadata + - name + type: Info - name: status_current_healthy help: Current number of healthy machines. each: @@ -659,6 +991,21 @@ spec: valueFrom: - status type: StateSet + - name: status_condition_last_transition_time + help: The condition last transition time of a machinehealthcheck. + each: + gauge: + labelsFromPath: + type: + - type + status: + - status + path: + - status + - conditions + valueFrom: + - lastTransitionTime + type: Gauge - name: owner help: Owner references. each: @@ -695,6 +1042,43 @@ spec: - uid metricNamePrefix: capi_machineset metrics: + - name: info + help: Information about a machineset. + each: + info: + labelsFromPath: + version: + - spec + - template + - spec + - version + bootstrap_reference_kind: + - spec + - template + - spec + - bootstrap + - configRef + - kind + bootstrap_reference_name: + - spec + - template + - spec + - bootstrap + - configRef + - name + infrastructure_reference_kind: + - spec + - template + - spec + - infrastructureRef + - kind + infrastructure_reference_name: + - spec + - template + - spec + - infrastructureRef + - name + type: Info - name: spec_replicas help: The number of desired machines for a machineset. each: @@ -704,7 +1088,7 @@ spec: - replicas nilIsZero: true type: Gauge - - name: status_available_replicas + - name: status_replicas_available help: The number of available replicas per machineset. each: gauge: @@ -713,7 +1097,7 @@ spec: - availableReplicas nilIsZero: true type: Gauge - - name: status_fully_labeled_replicas + - name: status_replicas_fully_labeled help: The number of fully labeled replicas per machineset. each: gauge: @@ -721,7 +1105,7 @@ spec: - status - fullyLabeledReplicas type: Gauge - - name: status_ready_replicas + - name: status_replicas_ready help: The number of ready replicas per machineset. each: gauge: @@ -776,6 +1160,21 @@ spec: valueFrom: - status type: StateSet + - name: status_condition_last_transition_time + help: The condition last transition time of a machineset. + each: + gauge: + labelsFromPath: + type: + - type + status: + - status + path: + - status + - conditions + valueFrom: + - lastTransitionTime + type: Gauge - name: owner help: Owner references. each: @@ -949,6 +1348,21 @@ spec: valueFrom: - status type: StateSet + - name: status_condition_last_transition_time + help: The condition last transition time of a machinepool. + each: + gauge: + labelsFromPath: + type: + - type + status: + - status + path: + - status + - conditions + valueFrom: + - lastTransitionTime + type: Gauge - name: owner help: Owner references. each: diff --git a/config/metrics/kustomization.yaml b/config/metrics/kustomization.yaml new file mode 100644 index 000000000000..3955c8d0d945 --- /dev/null +++ b/config/metrics/kustomization.yaml @@ -0,0 +1,13 @@ +resources: + - ./crd-clusterrole.yaml + +namespace: observability + +configMapGenerator: +- name: kube-state-metrics-crd-config-capi + files: + - capi.yaml=crd-metrics-config.yaml + options: + disableNameSuffixHash: true + labels: + kube-state-metrics/custom-resource: "true" diff --git a/hack/observability/kube-state-metrics/metrics/README.md b/config/metrics/templates/README.md similarity index 74% rename from hack/observability/kube-state-metrics/metrics/README.md rename to config/metrics/templates/README.md index 753ba822f79a..c52a76817f1f 100644 --- a/hack/observability/kube-state-metrics/metrics/README.md +++ b/config/metrics/templates/README.md @@ -4,4 +4,4 @@ The make target `generate-metrics-config` is used to generate a single file which contains the Cluster API specific custom resource configuration for kube-state-metrics. -To regenerate the file `../crd-config.yaml`, execute the `make generate-metrics-config` command. +To regenerate the file `../crd-metrics-config.yaml`, execute the `make generate-metrics-config` command. diff --git a/hack/observability/kube-state-metrics/metrics/cluster.yaml b/config/metrics/templates/cluster.yaml similarity index 100% rename from hack/observability/kube-state-metrics/metrics/cluster.yaml rename to config/metrics/templates/cluster.yaml diff --git a/config/metrics/templates/clusterclass.yaml b/config/metrics/templates/clusterclass.yaml new file mode 100644 index 000000000000..21b464d45e06 --- /dev/null +++ b/config/metrics/templates/clusterclass.yaml @@ -0,0 +1,26 @@ + - groupVersionKind: + group: cluster.x-k8s.io + kind: ClusterClass + version: v1beta1 + labelsFromPath: + name: + - metadata + - name + namespace: + - metadata + - namespace + uid: + - metadata + - uid + metricNamePrefix: capi_clusterclass + metrics: + - name: info + help: Information about a clusterclass. + each: + info: + # TODO: added metadata.name even it's already defined above as the metric doesn't work with empty labelsFromPath. + labelsFromPath: + name: + - metadata + - name + type: Info diff --git a/hack/observability/kube-state-metrics/metrics/common_metrics.yaml b/config/metrics/templates/common_metrics.yaml similarity index 70% rename from hack/observability/kube-state-metrics/metrics/common_metrics.yaml rename to config/metrics/templates/common_metrics.yaml index 49e55e1d31a2..073631b9c88b 100644 --- a/hack/observability/kube-state-metrics/metrics/common_metrics.yaml +++ b/config/metrics/templates/common_metrics.yaml @@ -35,3 +35,18 @@ valueFrom: - status type: StateSet + - name: status_condition_last_transition_time + help: The condition last transition time of a ${RESOURCE}. + each: + gauge: + labelsFromPath: + type: + - type + status: + - status + path: + - status + - conditions + valueFrom: + - lastTransitionTime + type: Gauge diff --git a/hack/observability/kube-state-metrics/metrics/header.yaml b/config/metrics/templates/header.yaml similarity index 100% rename from hack/observability/kube-state-metrics/metrics/header.yaml rename to config/metrics/templates/header.yaml diff --git a/config/metrics/templates/kubeadmconfig.yaml b/config/metrics/templates/kubeadmconfig.yaml new file mode 100644 index 000000000000..ffb240172a60 --- /dev/null +++ b/config/metrics/templates/kubeadmconfig.yaml @@ -0,0 +1,30 @@ + - groupVersionKind: + group: bootstrap.cluster.x-k8s.io + kind: KubeadmConfig + version: v1beta1 + labelsFromPath: + cluster_name: + - metadata + - labels + - cluster.x-k8s.io/cluster-name + name: + - metadata + - name + namespace: + - metadata + - namespace + uid: + - metadata + - uid + metricNamePrefix: capi_kubeadmconfig + metrics: + - name: info + help: Information about a kubeadmconfig. + each: + info: + # TODO: added metadata.name even it's already defined above as the metric doesn't work with empty labelsFromPath. + labelsFromPath: + name: + - metadata + - name + type: Info diff --git a/hack/observability/kube-state-metrics/metrics/kubeadmcontrolplane.yaml b/config/metrics/templates/kubeadmcontrolplane.yaml similarity index 100% rename from hack/observability/kube-state-metrics/metrics/kubeadmcontrolplane.yaml rename to config/metrics/templates/kubeadmcontrolplane.yaml diff --git a/hack/observability/kube-state-metrics/metrics/machine.yaml b/config/metrics/templates/machine.yaml similarity index 71% rename from hack/observability/kube-state-metrics/metrics/machine.yaml rename to config/metrics/templates/machine.yaml index addf4595c8b0..b49b1d3b7c7e 100644 --- a/hack/observability/kube-state-metrics/metrics/machine.yaml +++ b/config/metrics/templates/machine.yaml @@ -51,6 +51,28 @@ version: - spec - version + bootstrap_reference_kind: + - spec + - bootstrap + - configRef + - kind + bootstrap_reference_name: + - spec + - bootstrap + - configRef + - name + infrastructure_reference_kind: + - spec + - infrastructureRef + - kind + infrastructure_reference_name: + - spec + - infrastructureRef + - name + control_plane_name: + - metadata + - labels + - cluster.x-k8s.io/control-plane-name type: Info - name: addresses help: Address information about a machine. @@ -65,6 +87,15 @@ address: - address type: Info + - name: status_certificatesexpirydate + help: Information about certificate expiration date of a control plane node. + each: + gauge: + nilIsZero: true + path: + - status + - certificatesExpiryDate + type: Gauge - name: status_noderef help: Information about the node reference of a machine. each: diff --git a/hack/observability/kube-state-metrics/metrics/machinedeployment.yaml b/config/metrics/templates/machinedeployment.yaml similarity index 77% rename from hack/observability/kube-state-metrics/metrics/machinedeployment.yaml rename to config/metrics/templates/machinedeployment.yaml index 9f94745c8c0a..1ece3552149a 100644 --- a/hack/observability/kube-state-metrics/metrics/machinedeployment.yaml +++ b/config/metrics/templates/machinedeployment.yaml @@ -17,6 +17,43 @@ - uid metricNamePrefix: capi_machinedeployment metrics: + - name: info + help: Information about a machinedeployment. + each: + info: + labelsFromPath: + version: + - spec + - template + - spec + - version + bootstrap_reference_kind: + - spec + - template + - spec + - bootstrap + - configRef + - kind + bootstrap_reference_name: + - spec + - template + - spec + - bootstrap + - configRef + - name + infrastructure_reference_kind: + - spec + - template + - spec + - infrastructureRef + - kind + infrastructure_reference_name: + - spec + - template + - spec + - infrastructureRef + - name + type: Info - name: spec_paused help: Whether the machinedeployment is paused and any of its resources will not be processed by the controllers. each: diff --git a/hack/observability/kube-state-metrics/metrics/machinehealthcheck.yaml b/config/metrics/templates/machinehealthcheck.yaml similarity index 75% rename from hack/observability/kube-state-metrics/metrics/machinehealthcheck.yaml rename to config/metrics/templates/machinehealthcheck.yaml index 9df009812fb2..a14a164be72b 100644 --- a/hack/observability/kube-state-metrics/metrics/machinehealthcheck.yaml +++ b/config/metrics/templates/machinehealthcheck.yaml @@ -17,6 +17,16 @@ - uid metricNamePrefix: capi_machinehealthcheck metrics: + - name: info + help: Information about a machinehealthcheck. + each: + info: + # TODO: added metadata.name even it's already defined above as the metric doesn't work with empty labelsFromPath. + labelsFromPath: + name: + - metadata + - name + type: Info - name: status_current_healthy help: Current number of healthy machines. each: diff --git a/hack/observability/kube-state-metrics/metrics/machinepool.yaml b/config/metrics/templates/machinepool.yaml similarity index 100% rename from hack/observability/kube-state-metrics/metrics/machinepool.yaml rename to config/metrics/templates/machinepool.yaml diff --git a/hack/observability/kube-state-metrics/metrics/machineset.yaml b/config/metrics/templates/machineset.yaml similarity index 57% rename from hack/observability/kube-state-metrics/metrics/machineset.yaml rename to config/metrics/templates/machineset.yaml index 42ef967aac53..e2bde1ecde6e 100644 --- a/hack/observability/kube-state-metrics/metrics/machineset.yaml +++ b/config/metrics/templates/machineset.yaml @@ -17,6 +17,43 @@ - uid metricNamePrefix: capi_machineset metrics: + - name: info + help: Information about a machineset. + each: + info: + labelsFromPath: + version: + - spec + - template + - spec + - version + bootstrap_reference_kind: + - spec + - template + - spec + - bootstrap + - configRef + - kind + bootstrap_reference_name: + - spec + - template + - spec + - bootstrap + - configRef + - name + infrastructure_reference_kind: + - spec + - template + - spec + - infrastructureRef + - kind + infrastructure_reference_name: + - spec + - template + - spec + - infrastructureRef + - name + type: Info - name: spec_replicas help: The number of desired machines for a machineset. each: @@ -26,7 +63,7 @@ - replicas nilIsZero: true type: Gauge - - name: status_available_replicas + - name: status_replicas_available help: The number of available replicas per machineset. each: gauge: @@ -35,7 +72,7 @@ - availableReplicas nilIsZero: true type: Gauge - - name: status_fully_labeled_replicas + - name: status_replicas_fully_labeled help: The number of fully labeled replicas per machineset. each: gauge: @@ -43,7 +80,7 @@ - status - fullyLabeledReplicas type: Gauge - - name: status_ready_replicas + - name: status_replicas_ready help: The number of ready replicas per machineset. each: gauge: diff --git a/hack/observability/kube-state-metrics/metrics/owner_metric.yaml b/config/metrics/templates/owner_metric.yaml similarity index 100% rename from hack/observability/kube-state-metrics/metrics/owner_metric.yaml rename to config/metrics/templates/owner_metric.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index ce89a490ae9b..25230de2ff76 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: @@ -28,7 +27,6 @@ rules: - apiGroups: - addons.cluster.x-k8s.io resources: - - clusterresourcesets/finalizers - clusterresourcesets/status verbs: - get @@ -42,6 +40,18 @@ rules: - get - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create - apiGroups: - bootstrap.cluster.x-k8s.io - controlplane.cluster.x-k8s.io @@ -74,8 +84,6 @@ rules: resources: - clusterclasses verbs: - - create - - delete - get - list - patch @@ -100,28 +108,12 @@ rules: - get - list - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - clusters - - clusters/finalizers - - clusters/status - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - cluster.x-k8s.io resources: - clusters - clusters/status verbs: - - create - - delete - get - list - patch @@ -143,18 +135,6 @@ rules: - cluster.x-k8s.io resources: - machinedeployments - - machinedeployments/finalizers - verbs: - - get - - list - - patch - - update - - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - machinedeployments - - machinedeployments/finalizers - machinedeployments/status verbs: - create @@ -180,7 +160,6 @@ rules: - cluster.x-k8s.io resources: - machinehealthchecks - - machinehealthchecks/finalizers - machinehealthchecks/status verbs: - get @@ -192,8 +171,6 @@ rules: - cluster.x-k8s.io resources: - machinepools - - machinepools/finalizers - - machinepools/status verbs: - create - delete @@ -205,9 +182,8 @@ rules: - apiGroups: - cluster.x-k8s.io resources: - - machines - - machines/finalizers - - machines/status + - machinepools + - machinepools/status verbs: - create - delete @@ -222,9 +198,12 @@ rules: - machines - machines/status verbs: + - create - delete - get - list + - patch + - update - watch - apiGroups: - cluster.x-k8s.io @@ -233,15 +212,6 @@ rules: verbs: - get - list - - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - machinesets - - machinesets/finalizers - verbs: - - get - - list - patch - update - watch @@ -249,7 +219,6 @@ rules: - cluster.x-k8s.io resources: - machinesets - - machinesets/finalizers - machinesets/status verbs: - create @@ -267,6 +236,7 @@ rules: - get - list - patch + - update - watch - apiGroups: - "" @@ -274,22 +244,6 @@ rules: - events verbs: - create - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - nodes - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - "" resources: @@ -300,6 +254,7 @@ rules: - get - list - patch + - update - watch - apiGroups: - ipam.cluster.x-k8s.io diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index e270e753ef73..5a95f66d6f82 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -1,7 +1,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - creationTimestamp: null name: manager-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml index 25e21e3c963f..6d782eb8e350 100644 --- a/config/webhook/kustomizeconfig.yaml +++ b/config/webhook/kustomizeconfig.yaml @@ -21,5 +21,3 @@ namespace: path: webhooks/clientConfig/service/namespace create: true -varReference: -- path: metadata/annotations diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index f38f2a1e673f..a5d7a028af79 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -2,7 +2,6 @@ apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: - creationTimestamp: null name: mutating-webhook-configuration webhooks: - admissionReviewVersions: @@ -12,10 +11,10 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cluster-x-k8s-io-v1beta1-machine + path: /mutate-cluster-x-k8s-io-v1beta1-cluster failurePolicy: Fail matchPolicy: Equivalent - name: default.machine.cluster.x-k8s.io + name: default.cluster.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -25,7 +24,7 @@ webhooks: - CREATE - UPDATE resources: - - machines + - clusters sideEffects: None - admissionReviewVersions: - v1 @@ -34,10 +33,10 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cluster-x-k8s-io-v1beta1-machinedeployment + path: /mutate-cluster-x-k8s-io-v1beta1-clusterclass failurePolicy: Fail matchPolicy: Equivalent - name: default.machinedeployment.cluster.x-k8s.io + name: default.clusterclass.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -47,7 +46,7 @@ webhooks: - CREATE - UPDATE resources: - - machinedeployments + - clusterclasses sideEffects: None - admissionReviewVersions: - v1 @@ -56,10 +55,10 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cluster-x-k8s-io-v1beta1-machinehealthcheck + path: /mutate-cluster-x-k8s-io-v1beta1-machine failurePolicy: Fail matchPolicy: Equivalent - name: default.machinehealthcheck.cluster.x-k8s.io + name: default.machine.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -69,7 +68,7 @@ webhooks: - CREATE - UPDATE resources: - - machinehealthchecks + - machines sideEffects: None - admissionReviewVersions: - v1 @@ -78,10 +77,10 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cluster-x-k8s-io-v1beta1-machineset + path: /mutate-cluster-x-k8s-io-v1beta1-machinedeployment failurePolicy: Fail matchPolicy: Equivalent - name: default.machineset.cluster.x-k8s.io + name: default.machinedeployment.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -91,7 +90,7 @@ webhooks: - CREATE - UPDATE resources: - - machinesets + - machinedeployments sideEffects: None - admissionReviewVersions: - v1 @@ -100,10 +99,10 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cluster-x-k8s-io-v1beta1-cluster + path: /mutate-cluster-x-k8s-io-v1beta1-machinehealthcheck failurePolicy: Fail matchPolicy: Equivalent - name: default.cluster.cluster.x-k8s.io + name: default.machinehealthcheck.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -113,7 +112,7 @@ webhooks: - CREATE - UPDATE resources: - - clusters + - machinehealthchecks sideEffects: None - admissionReviewVersions: - v1 @@ -122,10 +121,10 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cluster-x-k8s-io-v1beta1-clusterclass + path: /mutate-cluster-x-k8s-io-v1beta1-machineset failurePolicy: Fail matchPolicy: Equivalent - name: default.clusterclass.cluster.x-k8s.io + name: default.machineset.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -135,7 +134,7 @@ webhooks: - CREATE - UPDATE resources: - - clusterclasses + - machinesets sideEffects: None - admissionReviewVersions: - v1 @@ -207,7 +206,6 @@ webhooks: apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - creationTimestamp: null name: validating-webhook-configuration webhooks: - admissionReviewVersions: @@ -217,10 +215,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta1-machine + path: /validate-cluster-x-k8s-io-v1beta1-cluster failurePolicy: Fail matchPolicy: Equivalent - name: validation.machine.cluster.x-k8s.io + name: validation.cluster.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -229,8 +227,9 @@ webhooks: operations: - CREATE - UPDATE + - DELETE resources: - - machines + - clusters sideEffects: None - admissionReviewVersions: - v1 @@ -239,10 +238,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta1-machinedeployment + path: /validate-cluster-x-k8s-io-v1beta1-clusterclass failurePolicy: Fail matchPolicy: Equivalent - name: validation.machinedeployment.cluster.x-k8s.io + name: validation.clusterclass.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -251,8 +250,9 @@ webhooks: operations: - CREATE - UPDATE + - DELETE resources: - - machinedeployments + - clusterclasses sideEffects: None - admissionReviewVersions: - v1 @@ -261,10 +261,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta1-machinehealthcheck + path: /validate-cluster-x-k8s-io-v1beta1-machine failurePolicy: Fail matchPolicy: Equivalent - name: validation.machinehealthcheck.cluster.x-k8s.io + name: validation.machine.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -274,7 +274,7 @@ webhooks: - CREATE - UPDATE resources: - - machinehealthchecks + - machines sideEffects: None - admissionReviewVersions: - v1 @@ -283,10 +283,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta1-machineset + path: /validate-cluster-x-k8s-io-v1beta1-machinedeployment failurePolicy: Fail matchPolicy: Equivalent - name: validation.machineset.cluster.x-k8s.io + name: validation.machinedeployment.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -296,7 +296,7 @@ webhooks: - CREATE - UPDATE resources: - - machinesets + - machinedeployments sideEffects: None - admissionReviewVersions: - v1 @@ -305,10 +305,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta1-cluster + path: /validate-cluster-x-k8s-io-v1beta1-machinehealthcheck failurePolicy: Fail matchPolicy: Equivalent - name: validation.cluster.cluster.x-k8s.io + name: validation.machinehealthcheck.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -317,9 +317,8 @@ webhooks: operations: - CREATE - UPDATE - - DELETE resources: - - clusters + - machinehealthchecks sideEffects: None - admissionReviewVersions: - v1 @@ -328,10 +327,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta1-clusterclass + path: /validate-cluster-x-k8s-io-v1beta1-machineset failurePolicy: Fail matchPolicy: Equivalent - name: validation.clusterclass.cluster.x-k8s.io + name: validation.machineset.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -340,9 +339,8 @@ webhooks: operations: - CREATE - UPDATE - - DELETE resources: - - clusterclasses + - machinesets sideEffects: None - admissionReviewVersions: - v1 @@ -417,7 +415,29 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-ipam-cluster-x-k8s-io-v1alpha1-ipaddress + path: /validate-addons-cluster-x-k8s-io-v1beta1-clusterresourcesetbinding + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.clusterresourcesetbinding.addons.cluster.x-k8s.io + rules: + - apiGroups: + - addons.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - clusterresourcesetbindings + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-ipam-cluster-x-k8s-io-v1beta1-ipaddress failurePolicy: Fail matchPolicy: Equivalent name: validation.ipaddress.ipam.cluster.x-k8s.io @@ -425,7 +445,7 @@ webhooks: - apiGroups: - ipam.cluster.x-k8s.io apiVersions: - - v1alpha1 + - v1beta1 operations: - CREATE - UPDATE @@ -440,7 +460,7 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-ipam-cluster-x-k8s-io-v1alpha1-ipaddressclaim + path: /validate-ipam-cluster-x-k8s-io-v1beta1-ipaddressclaim failurePolicy: Fail matchPolicy: Equivalent name: validation.ipaddressclaim.ipam.cluster.x-k8s.io @@ -448,7 +468,7 @@ webhooks: - apiGroups: - ipam.cluster.x-k8s.io apiVersions: - - v1alpha1 + - v1beta1 operations: - CREATE - UPDATE diff --git a/controllers/alias.go b/controllers/alias.go index 2e6af641de4e..fba47941cb7e 100644 --- a/controllers/alias.go +++ b/controllers/alias.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "time" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -41,8 +42,9 @@ import ( // ClusterReconciler reconciles a Cluster object. type ClusterReconciler struct { - Client client.Client - APIReader client.Reader + Client client.Client + UnstructuredCachingClient client.Client + APIReader client.Reader // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string @@ -50,54 +52,68 @@ type ClusterReconciler struct { func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&clustercontroller.Reconciler{ - Client: r.Client, - APIReader: r.APIReader, - WatchFilterValue: r.WatchFilterValue, + Client: r.Client, + UnstructuredCachingClient: r.UnstructuredCachingClient, + APIReader: r.APIReader, + WatchFilterValue: r.WatchFilterValue, }).SetupWithManager(ctx, mgr, options) } // MachineReconciler reconciles a Machine object. type MachineReconciler struct { - Client client.Client - APIReader client.Reader - Tracker *remote.ClusterCacheTracker + Client client.Client + UnstructuredCachingClient client.Client + APIReader client.Reader + Tracker *remote.ClusterCacheTracker // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string + + // NodeDrainClientTimeout timeout of the client used for draining nodes. + NodeDrainClientTimeout time.Duration } func (r *MachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&machinecontroller.Reconciler{ - Client: r.Client, - APIReader: r.APIReader, - Tracker: r.Tracker, - WatchFilterValue: r.WatchFilterValue, + Client: r.Client, + UnstructuredCachingClient: r.UnstructuredCachingClient, + APIReader: r.APIReader, + Tracker: r.Tracker, + WatchFilterValue: r.WatchFilterValue, + NodeDrainClientTimeout: r.NodeDrainClientTimeout, }).SetupWithManager(ctx, mgr, options) } // MachineSetReconciler reconciles a MachineSet object. type MachineSetReconciler struct { - Client client.Client - APIReader client.Reader - Tracker *remote.ClusterCacheTracker + Client client.Client + UnstructuredCachingClient client.Client + APIReader client.Reader + Tracker *remote.ClusterCacheTracker // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string + + // Deprecated: DeprecatedInfraMachineNaming. Name the InfraStructureMachines after the InfraMachineTemplate. + DeprecatedInfraMachineNaming bool } func (r *MachineSetReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&machinesetcontroller.Reconciler{ - Client: r.Client, - APIReader: r.APIReader, - Tracker: r.Tracker, - WatchFilterValue: r.WatchFilterValue, + Client: r.Client, + UnstructuredCachingClient: r.UnstructuredCachingClient, + APIReader: r.APIReader, + Tracker: r.Tracker, + WatchFilterValue: r.WatchFilterValue, + DeprecatedInfraMachineNaming: r.DeprecatedInfraMachineNaming, }).SetupWithManager(ctx, mgr, options) } // MachineDeploymentReconciler reconciles a MachineDeployment object. type MachineDeploymentReconciler struct { - Client client.Client - APIReader client.Reader + Client client.Client + UnstructuredCachingClient client.Client + APIReader client.Reader // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string @@ -105,9 +121,10 @@ type MachineDeploymentReconciler struct { func (r *MachineDeploymentReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&machinedeploymentcontroller.Reconciler{ - Client: r.Client, - APIReader: r.APIReader, - WatchFilterValue: r.WatchFilterValue, + Client: r.Client, + UnstructuredCachingClient: r.UnstructuredCachingClient, + APIReader: r.APIReader, + WatchFilterValue: r.WatchFilterValue, }).SetupWithManager(ctx, mgr, options) } @@ -130,7 +147,8 @@ func (r *MachineHealthCheckReconciler) SetupWithManager(ctx context.Context, mgr // ClusterTopologyReconciler reconciles a managed topology for a Cluster object. type ClusterTopologyReconciler struct { - Client client.Client + Client client.Client + Tracker *remote.ClusterCacheTracker // APIReader is used to list MachineSets directly via the API server to avoid // race conditions caused by an outdated cache. APIReader client.Reader @@ -149,6 +167,7 @@ func (r *ClusterTopologyReconciler) SetupWithManager(ctx context.Context, mgr ct return (&clustertopologycontroller.Reconciler{ Client: r.Client, APIReader: r.APIReader, + Tracker: r.Tracker, RuntimeClient: r.RuntimeClient, UnstructuredCachingClient: r.UnstructuredCachingClient, WatchFilterValue: r.WatchFilterValue, @@ -197,8 +216,11 @@ func (r *MachineSetTopologyReconciler) SetupWithManager(ctx context.Context, mgr // ClusterClassReconciler reconciles the ClusterClass object. type ClusterClassReconciler struct { - Client client.Client - APIReader client.Reader + // internalReconciler is used to store the reconciler after SetupWithManager + // so that the Reconcile function can work. + internalReconciler *clusterclasscontroller.Reconciler + + Client client.Client // RuntimeClient is a client for calling runtime extensions. RuntimeClient runtimeclient.Client @@ -212,11 +234,20 @@ type ClusterClassReconciler struct { } func (r *ClusterClassReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { - return (&clusterclasscontroller.Reconciler{ + r.internalReconciler = &clusterclasscontroller.Reconciler{ Client: r.Client, - APIReader: r.APIReader, RuntimeClient: r.RuntimeClient, UnstructuredCachingClient: r.UnstructuredCachingClient, WatchFilterValue: r.WatchFilterValue, - }).SetupWithManager(ctx, mgr, options) + } + return r.internalReconciler.SetupWithManager(ctx, mgr, options) +} + +// Reconcile can be used to reconcile a ClusterClass. +// Before it can be used, all fields of the ClusterClassReconciler have to be set +// and SetupWithManager has to be called. +// This method can be used when testing the behavior of the desired state computation of +// the Cluster topology controller (because that requires a reconciled ClusterClass). +func (r *ClusterClassReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + return r.internalReconciler.Reconcile(ctx, req) } diff --git a/controllers/external/tracker.go b/controllers/external/tracker.go index d482d89f4d27..26a2a4206fd9 100644 --- a/controllers/external/tracker.go +++ b/controllers/external/tracker.go @@ -17,12 +17,13 @@ limitations under the License. package external import ( + "fmt" "sync" "github.com/go-logr/logr" "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -36,10 +37,11 @@ type ObjectTracker struct { m sync.Map Controller controller.Controller + Cache cache.Cache } // Watch uses the controller to issue a Watch only if the object hasn't been seen before. -func (o *ObjectTracker) Watch(log logr.Logger, obj runtime.Object, handler handler.EventHandler, p ...predicate.Predicate) error { +func (o *ObjectTracker) Watch(log logr.Logger, obj client.Object, handler handler.EventHandler, p ...predicate.Predicate) error { // Consider this a no-op if the controller isn't present. if o.Controller == nil { return nil @@ -51,18 +53,16 @@ func (o *ObjectTracker) Watch(log logr.Logger, obj runtime.Object, handler handl return nil } - u := &unstructured.Unstructured{} - u.SetGroupVersionKind(gvk) - - log.Info("Adding watcher on external object", "groupVersionKind", gvk.String()) - err := o.Controller.Watch( - &source.Kind{Type: u}, + log.Info(fmt.Sprintf("Adding watch on external object %q", gvk.String())) + err := o.Controller.Watch(source.Kind( + o.Cache, + obj.DeepCopyObject().(client.Object), handler, append(p, predicates.ResourceNotPaused(log))..., - ) + )) if err != nil { o.m.Delete(key) - return errors.Wrapf(err, "failed to add watcher on external object %q", gvk.String()) + return errors.Wrapf(err, "failed to add watch on external object %q", gvk.String()) } return nil } diff --git a/controllers/external/tracker_test.go b/controllers/external/tracker_test.go index fd4c7d2dd9cf..553edb220513 100644 --- a/controllers/external/tracker_test.go +++ b/controllers/external/tracker_test.go @@ -24,9 +24,7 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -55,7 +53,7 @@ func newWatchCountController(raiseError bool) *watchCountController { } } -func (c *watchCountController) Watch(_ source.Source, _ handler.EventHandler, _ ...predicate.Predicate) error { +func (c *watchCountController) Watch(_ source.Source) error { c.count++ if c.raiseError { return errors.New("injected failure") diff --git a/controllers/external/util.go b/controllers/external/util.go index 89647454417d..4efdb9c133f2 100644 --- a/controllers/external/util.go +++ b/controllers/external/util.go @@ -70,6 +70,10 @@ type CreateFromTemplateInput struct { // Namespace is the Kubernetes namespace the cloned object should be created into. Namespace string + // Name is used as the name of the generated object, if set. + // If it isn't set the template name will be used as prefix to generate a name instead. + Name string + // ClusterName is the cluster this object is linked to. ClusterName string @@ -96,6 +100,7 @@ func CreateFromTemplate(ctx context.Context, in *CreateFromTemplateInput) (*core Template: from, TemplateRef: in.TemplateRef, Namespace: in.Namespace, + Name: in.Name, ClusterName: in.ClusterName, OwnerRef: in.OwnerRef, Labels: in.Labels, @@ -125,6 +130,10 @@ type GenerateTemplateInput struct { // Namespace is the Kubernetes namespace the cloned object should be created into. Namespace string + // Name is used as the name of the generated object, if set. + // If it isn't set the template name will be used as prefix to generate a name instead. + Name string + // ClusterName is the cluster this object is linked to. ClusterName string @@ -156,7 +165,10 @@ func GenerateTemplate(in *GenerateTemplateInput) (*unstructured.Unstructured, er to.SetFinalizers(nil) to.SetUID("") to.SetSelfLink("") - to.SetName(names.SimpleNameGenerator.GenerateName(in.Template.GetName() + "-")) + to.SetName(in.Name) + if to.GetName() == "" { + to.SetName(names.SimpleNameGenerator.GenerateName(in.Template.GetName() + "-")) + } to.SetNamespace(in.Namespace) // Set annotations. diff --git a/controllers/external/util_test.go b/controllers/external/util_test.go index 9d9e94d85f82..f10eefab3687 100644 --- a/controllers/external/util_test.go +++ b/controllers/external/util_test.go @@ -64,8 +64,8 @@ func TestGetResourceFound(t *testing.T) { fakeClient := fake.NewClientBuilder().WithObjects(testResource.DeepCopy()).Build() got, err := Get(ctx, fakeClient, testResourceReference, metav1.NamespaceDefault) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(got).To(Equal(testResource)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(BeComparableTo(testResource)) } func TestGetResourceNotFound(t *testing.T) { @@ -158,12 +158,12 @@ func TestCloneTemplateResourceFound(t *testing.T) { expectedKind := "Purple" expectedAPIVersion := templateAPIVersion expectedMetadata, ok, err := unstructured.NestedMap(template.UnstructuredContent(), "spec", "template", "metadata") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(ok).To(BeTrue()) g.Expect(expectedMetadata).NotTo(BeEmpty()) expectedSpec, ok, err := unstructured.NestedMap(template.UnstructuredContent(), "spec", "template", "spec") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(ok).To(BeTrue()) g.Expect(expectedSpec).NotTo(BeEmpty()) @@ -184,7 +184,7 @@ func TestCloneTemplateResourceFound(t *testing.T) { clusterv1.TemplateClonedFromNameAnnotation: "should-be-overwritten", }, }) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(ref).NotTo(BeNil()) g.Expect(ref.Kind).To(Equal(expectedKind)) g.Expect(ref.APIVersion).To(Equal(expectedAPIVersion)) @@ -201,9 +201,9 @@ func TestCloneTemplateResourceFound(t *testing.T) { g.Expect(clone.GetOwnerReferences()).To(ContainElement(owner)) cloneSpec, ok, err := unstructured.NestedMap(clone.UnstructuredContent(), "spec") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(ok).To(BeTrue()) - g.Expect(cloneSpec).To(Equal(expectedSpec)) + g.Expect(cloneSpec).To(BeComparableTo(expectedSpec)) cloneLabels := clone.GetLabels() g.Expect(cloneLabels).To(HaveKeyWithValue(clusterv1.ClusterNameLabel, testClusterName)) @@ -255,7 +255,7 @@ func TestCloneTemplateResourceFoundNoOwner(t *testing.T) { expectedLabels := map[string]string{clusterv1.ClusterNameLabel: testClusterName} expectedSpec, ok, err := unstructured.NestedMap(template.UnstructuredContent(), "spec", "template", "spec") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(ok).To(BeTrue()) g.Expect(expectedSpec).NotTo(BeEmpty()) @@ -265,14 +265,15 @@ func TestCloneTemplateResourceFoundNoOwner(t *testing.T) { Client: fakeClient, TemplateRef: templateRef, Namespace: metav1.NamespaceDefault, + Name: "object-name", ClusterName: testClusterName, }) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(ref).NotTo(BeNil()) g.Expect(ref.Kind).To(Equal(expectedKind)) g.Expect(ref.APIVersion).To(Equal(expectedAPIVersion)) g.Expect(ref.Namespace).To(Equal(metav1.NamespaceDefault)) - g.Expect(ref.Name).To(HavePrefix(templateRef.Name)) + g.Expect(ref.Name).To(Equal("object-name")) clone := &unstructured.Unstructured{} clone.SetKind(expectedKind) @@ -282,9 +283,9 @@ func TestCloneTemplateResourceFoundNoOwner(t *testing.T) { g.Expect(clone.GetLabels()).To(Equal(expectedLabels)) g.Expect(clone.GetOwnerReferences()).To(BeEmpty()) cloneSpec, ok, err := unstructured.NestedMap(clone.UnstructuredContent(), "spec") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(ok).To(BeTrue()) - g.Expect(cloneSpec).To(Equal(expectedSpec)) + g.Expect(cloneSpec).To(BeComparableTo(expectedSpec)) } func TestCloneTemplateMissingSpecTemplate(t *testing.T) { diff --git a/controllers/noderefutil/providerid.go b/controllers/noderefutil/providerid.go deleted file mode 100644 index 6c6dede690b8..000000000000 --- a/controllers/noderefutil/providerid.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package noderefutil implements NodeRef utils. -package noderefutil - -import ( - "errors" - "regexp" - "strings" -) - -var ( - // ErrEmptyProviderID means that the provider id is empty. - ErrEmptyProviderID = errors.New("providerID is empty") - - // ErrInvalidProviderID means that the provider id has an invalid form. - ErrInvalidProviderID = errors.New("providerID must be of the form :////") -) - -// ProviderID is a struct representation of a Kubernetes ProviderID. -// Format: cloudProvider://optional/segments/etc/id -type ProviderID struct { - original string - cloudProvider string - id string -} - -/* -- must start with at least one non-colon -- followed by :// -- followed by any number of characters -- must end with a non-slash. -*/ -var providerIDRegex = regexp.MustCompile("^[^:]+://.*[^/]$") - -// NewProviderID parses the input string and returns a new ProviderID. -func NewProviderID(id string) (*ProviderID, error) { - if id == "" { - return nil, ErrEmptyProviderID - } - - if !providerIDRegex.MatchString(id) { - return nil, ErrInvalidProviderID - } - - colonIndex := strings.Index(id, ":") - cloudProvider := id[0:colonIndex] - - lastSlashIndex := strings.LastIndex(id, "/") - instance := id[lastSlashIndex+1:] - - res := &ProviderID{ - original: id, - cloudProvider: cloudProvider, - id: instance, - } - - if !res.Validate() { - return nil, ErrInvalidProviderID - } - - return res, nil -} - -// CloudProvider returns the cloud provider portion of the ProviderID. -func (p *ProviderID) CloudProvider() string { - return p.cloudProvider -} - -// ID returns the identifier portion of the ProviderID. -func (p *ProviderID) ID() string { - return p.id -} - -// Equals returns true if this ProviderID string matches another ProviderID string. -func (p *ProviderID) Equals(o *ProviderID) bool { - return p.String() == o.String() -} - -// String returns the string representation of this object. -func (p ProviderID) String() string { - return p.original -} - -// Validate returns true if the provider id is valid. -func (p *ProviderID) Validate() bool { - return p.CloudProvider() != "" && p.ID() != "" -} - -// IndexKey returns the required level of uniqueness -// to represent and index machines uniquely from their node providerID. -func (p *ProviderID) IndexKey() string { - return p.String() -} diff --git a/controllers/noderefutil/providerid_test.go b/controllers/noderefutil/providerid_test.go deleted file mode 100644 index 77a77a831a4f..000000000000 --- a/controllers/noderefutil/providerid_test.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package noderefutil - -import ( - "testing" - - . "github.com/onsi/gomega" -) - -const aws = "aws" -const azure = "azure" - -func TestNewProviderID(t *testing.T) { - tests := []struct { - name string - input string - expectedID string - }{ - { - name: "2 slashes after colon, one segment", - input: "aws://instance-id", - expectedID: "instance-id", - }, - { - name: "more than 2 slashes after colon, one segment", - input: "aws:////instance-id", - expectedID: "instance-id", - }, - { - name: "multiple filled-in segments (aws format)", - input: "aws:///zone/instance-id", - expectedID: "instance-id", - }, - { - name: "multiple filled-in segments", - input: "aws://bar/baz/instance-id", - expectedID: "instance-id", - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - g := NewWithT(t) - - id, err := NewProviderID(tc.input) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(id.CloudProvider()).To(Equal(aws)) - g.Expect(id.ID()).To(Equal(tc.expectedID)) - }) - } -} - -func TestInvalidProviderID(t *testing.T) { - testCases := []struct { - name string - input string - err error - }{ - { - name: "empty id", - input: "", - err: ErrEmptyProviderID, - }, - { - name: "only empty segments", - input: "aws:///////", - err: ErrInvalidProviderID, - }, - { - name: "missing cloud provider", - input: "://instance-id", - err: ErrInvalidProviderID, - }, - { - name: "missing cloud provider and colon", - input: "//instance-id", - err: ErrInvalidProviderID, - }, - { - name: "missing cloud provider, colon, one leading slash", - input: "/instance-id", - err: ErrInvalidProviderID, - }, - { - name: "just an id", - input: "instance-id", - err: ErrInvalidProviderID, - }, - } - - for _, test := range testCases { - t.Run(test.name, func(t *testing.T) { - g := NewWithT(t) - - _, err := NewProviderID(test.input) - g.Expect(err).To(MatchError(test.err)) - }) - } -} - -func TestProviderIDEquals(t *testing.T) { - g := NewWithT(t) - - inputAWS1 := "aws:////instance-id1" - parsedAWS1, err := NewProviderID(inputAWS1) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(parsedAWS1.String()).To(Equal(inputAWS1)) - g.Expect(parsedAWS1.ID()).To(Equal("instance-id1")) - g.Expect(parsedAWS1.CloudProvider()).To(Equal(aws)) - - inputAWS2 := "aws:///us-west-1/instance-id1" - parsedAWS2, err := NewProviderID(inputAWS2) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(parsedAWS2.String()).To(Equal(inputAWS2)) - g.Expect(parsedAWS2.ID()).To(Equal("instance-id1")) - g.Expect(parsedAWS2.CloudProvider()).To(Equal(aws)) - - // Test for inequality - g.Expect(parsedAWS1.Equals(parsedAWS2)).To(BeFalse()) - - inputAzure1 := "azure:///subscriptions/4920076a-ba9f-11ec-8422-0242ac120002/resourceGroups/default-template/providers/Microsoft.Compute/virtualMachines/default-template-control-plane-fhrvh" - parsedAzure1, err := NewProviderID(inputAzure1) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(parsedAzure1.String()).To(Equal(inputAzure1)) - g.Expect(parsedAzure1.ID()).To(Equal("default-template-control-plane-fhrvh")) - g.Expect(parsedAzure1.CloudProvider()).To(Equal(azure)) - - inputAzure2 := inputAzure1 - parsedAzure2, err := NewProviderID(inputAzure2) - g.Expect(err).NotTo(HaveOccurred()) - - // Test for equality - g.Expect(parsedAzure1.Equals(parsedAzure2)).To(BeTrue()) - - // Here we ensure that two different ProviderID strings that happen to have the same 'ID' are not equal - // We use Azure VMSS as an example, two different '0' VMs in different pools: k8s-pool1-vmss, and k8s-pool2-vmss - inputAzureVMFromOneVMSS := "azure:///subscriptions/4920076a-ba9f-11ec-8422-0242ac120002/resourceGroups/default-template/providers/Microsoft.Compute/virtualMachineScaleSets/k8s-pool1-vmss/virtualMachines/0" - inputAzureVMFromAnotherVMSS := "azure:///subscriptions/4920076a-ba9f-11ec-8422-0242ac120002/resourceGroups/default-template/providers/Microsoft.Compute/virtualMachineScaleSets/k8s-pool2-vmss/virtualMachines/0" - parsedAzureVMFromOneVMSS, err := NewProviderID(inputAzureVMFromOneVMSS) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(parsedAzureVMFromOneVMSS.String()).To(Equal(inputAzureVMFromOneVMSS)) - g.Expect(parsedAzureVMFromOneVMSS.ID()).To(Equal("0")) - g.Expect(parsedAzureVMFromOneVMSS.CloudProvider()).To(Equal(azure)) - - parsedAzureVMFromAnotherVMSS, err := NewProviderID(inputAzureVMFromAnotherVMSS) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(parsedAzureVMFromAnotherVMSS.String()).To(Equal(inputAzureVMFromAnotherVMSS)) - g.Expect(parsedAzureVMFromAnotherVMSS.ID()).To(Equal("0")) - g.Expect(parsedAzureVMFromAnotherVMSS.CloudProvider()).To(Equal(azure)) - - // Test for inequality - g.Expect(parsedAzureVMFromOneVMSS.Equals(parsedAzureVMFromAnotherVMSS)).To(BeFalse()) -} diff --git a/controllers/noderefutil/util.go b/controllers/noderefutil/util.go index d96e25c64bf1..de40751fde4b 100644 --- a/controllers/noderefutil/util.go +++ b/controllers/noderefutil/util.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package noderefutil implements noderef utilities. package noderefutil import ( diff --git a/controllers/noderefutil/util_test.go b/controllers/noderefutil/util_test.go index a071e27efb4d..0e35e2aebed9 100644 --- a/controllers/noderefutil/util_test.go +++ b/controllers/noderefutil/util_test.go @@ -222,7 +222,7 @@ func TestGetReadyCondition(t *testing.T) { t.Run(test.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(GetReadyCondition(test.nodeStatus)).To(Equal(test.expectedCondition)) + g.Expect(GetReadyCondition(test.nodeStatus)).To(BeComparableTo(test.expectedCondition)) }) } } diff --git a/controllers/remote/cluster_cache_healthcheck_test.go b/controllers/remote/cluster_cache_healthcheck_test.go index f4e3e5a9e948..ce55499b01d4 100644 --- a/controllers/remote/cluster_cache_healthcheck_test.go +++ b/controllers/remote/cluster_cache_healthcheck_test.go @@ -28,9 +28,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/klog/v2/klogr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" @@ -58,10 +59,12 @@ func TestClusterCacheHealthCheck(t *testing.T) { t.Log("Setting up a new manager") var err error mgr, err = manager.New(env.Config, manager.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, }) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) mgrContext, mgrCancel = context.WithCancel(ctx) t.Log("Starting the manager") @@ -73,16 +76,15 @@ func TestClusterCacheHealthCheck(t *testing.T) { k8sClient = mgr.GetClient() t.Log("Setting up a ClusterCacheTracker") - log := klogr.New() cct, err = NewClusterCacheTracker(mgr, ClusterCacheTrackerOptions{ - Log: &log, - Indexes: DefaultIndexes, + Log: &ctrl.Log, + Indexes: []Index{NodeProviderIDIndex}, }) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) t.Log("Creating a namespace for the test") ns, err := env.CreateNamespace(ctx, "cluster-cache-health-test") - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) t.Log("Creating a test cluster") testCluster := &clusterv1.Cluster{ @@ -130,9 +132,13 @@ func TestClusterCacheHealthCheck(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() + httpClient, err := rest.HTTPClientFor(env.Config) + g.Expect(err).ToNot(HaveOccurred()) + go cct.healthCheckCluster(ctx, &healthCheckInput{ cluster: testClusterKey, cfg: env.Config, + httpClient: httpClient, interval: testPollInterval, requestTimeout: testPollTimeout, unhealthyThreshold: testUnhealthyThreshold, @@ -146,6 +152,36 @@ func TestClusterCacheHealthCheck(t *testing.T) { }, 5*time.Second, 1*time.Second).Should(BeTrue()) }) + t.Run("during creation of a new cluster accessor", func(t *testing.T) { + g := NewWithT(t) + ns := setup(t, g) + defer teardown(t, g, ns) + // Create a context with a timeout to cancel the healthcheck after some time + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + // Delete the cluster accessor and lock the cluster to simulate creation of a new cluster accessor + cct.deleteAccessor(ctx, testClusterKey) + g.Expect(cct.clusterLock.TryLock(testClusterKey)).To(BeTrue()) + startHealthCheck := time.Now() + + httpClient, err := rest.HTTPClientFor(env.Config) + g.Expect(err).ToNot(HaveOccurred()) + cct.healthCheckCluster(ctx, &healthCheckInput{ + cluster: testClusterKey, + cfg: env.Config, + httpClient: httpClient, + interval: testPollInterval, + requestTimeout: testPollTimeout, + unhealthyThreshold: testUnhealthyThreshold, + path: "/", + }) + timeElapsedForHealthCheck := time.Since(startHealthCheck) + // If the duration is shorter than the timeout, we know that the healthcheck wasn't requeued properly. + g.Expect(timeElapsedForHealthCheck).Should(BeNumerically(">=", time.Second)) + // The healthcheck should be aborted by the timout of the context + g.Expect(ctx.Done()).Should(BeClosed()) + }) + t.Run("with an invalid path", func(t *testing.T) { g := NewWithT(t) ns := setup(t, g) @@ -154,10 +190,13 @@ func TestClusterCacheHealthCheck(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() + httpClient, err := rest.HTTPClientFor(env.Config) + g.Expect(err).ToNot(HaveOccurred()) go cct.healthCheckCluster(ctx, &healthCheckInput{ cluster: testClusterKey, cfg: env.Config, + httpClient: httpClient, interval: testPollInterval, requestTimeout: testPollTimeout, unhealthyThreshold: testUnhealthyThreshold, @@ -181,17 +220,20 @@ func TestClusterCacheHealthCheck(t *testing.T) { // Set the host to a random free port on localhost addr, err := net.ResolveTCPAddr("tcp", "localhost:0") - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) l, err := net.ListenTCP("tcp", addr) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(l.Close()).To(Succeed()) config := rest.CopyConfig(env.Config) config.Host = fmt.Sprintf("http://127.0.0.1:%d", l.Addr().(*net.TCPAddr).Port) + httpClient, err := rest.HTTPClientFor(env.Config) + g.Expect(err).ToNot(HaveOccurred()) go cct.healthCheckCluster(ctx, &healthCheckInput{ cluster: testClusterKey, cfg: config, + httpClient: httpClient, interval: testPollInterval, requestTimeout: testPollTimeout, unhealthyThreshold: testUnhealthyThreshold, diff --git a/controllers/remote/cluster_cache_reconciler_test.go b/controllers/remote/cluster_cache_reconciler_test.go index c9567cbf0cd7..ce0586015707 100644 --- a/controllers/remote/cluster_cache_reconciler_test.go +++ b/controllers/remote/cluster_cache_reconciler_test.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" @@ -71,7 +72,7 @@ func TestClusterCacheReconciler(t *testing.T) { t.Log("Creating a clusterAccessor for the cluster") _, err := cct.GetClient(ctx, testClusterKey) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } setup := func(t *testing.T, g *WithT) *corev1.Namespace { @@ -80,14 +81,16 @@ func TestClusterCacheReconciler(t *testing.T) { t.Log("Setting up a new manager") var err error mgr, err = manager.New(env.Config, manager.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, }) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) t.Log("Setting up a ClusterCacheTracker") cct, err = NewClusterCacheTracker(mgr, ClusterCacheTrackerOptions{}) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) t.Log("Creating the ClusterCacheReconciler") r := &ClusterCacheReconciler{ @@ -107,7 +110,7 @@ func TestClusterCacheReconciler(t *testing.T) { t.Log("Creating a namespace for the test") ns, err := env.CreateNamespace(ctx, "cluster-cache-test") - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) t.Log("Creating clusters to test with") createAndWatchCluster("cluster-1", ns, g) diff --git a/controllers/remote/cluster_cache_tracker.go b/controllers/remote/cluster_cache_tracker.go index f72e9a8b2f52..4a7b71a47c7e 100644 --- a/controllers/remote/cluster_cache_tracker.go +++ b/controllers/remote/cluster_cache_tracker.go @@ -18,7 +18,9 @@ package remote import ( "context" + "crypto/rsa" "fmt" + "net/http" "os" "sync" "time" @@ -47,6 +49,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -66,8 +69,15 @@ var ErrClusterLocked = errors.New("cluster is locked already") type ClusterCacheTracker struct { log logr.Logger clientUncachedObjects []client.Object - client client.Client - scheme *runtime.Scheme + + client client.Client + + // SecretCachingClient is a client which caches secrets. + // If set it will be used to read the kubeconfig secret. + // Otherwise the default client from the manager will be used. + secretCachingClient client.Client + + scheme *runtime.Scheme // clusterAccessorsLock is used to lock the access to the clusterAccessors map. clusterAccessorsLock sync.RWMutex @@ -79,6 +89,10 @@ type ClusterCacheTracker struct { indexes []Index + // controllerName is the name of the controller. + // This is used to calculate the user agent string. + controllerName string + // controllerPodMetadata is the Pod metadata of the controller using this ClusterCacheTracker. // This is only set when the POD_NAMESPACE, POD_NAME and POD_UID environment variables are set. // This information will be used to detected if the controller is running on a workload cluster, so @@ -89,6 +103,11 @@ type ClusterCacheTracker struct { // ClusterCacheTrackerOptions defines options to configure // a ClusterCacheTracker. type ClusterCacheTrackerOptions struct { + // SecretCachingClient is a client which caches secrets. + // If set it will be used to read the kubeconfig secret. + // Otherwise the default client from the manager will be used. + SecretCachingClient client.Client + // Log is the logger used throughout the lifecycle of caches. // Defaults to a no-op logger if it's not set. Log *logr.Logger @@ -98,6 +117,11 @@ type ClusterCacheTrackerOptions struct { // Defaults to never caching ConfigMap and Secret if not set. ClientUncachedObjects []client.Object Indexes []Index + + // ControllerName is the name of the controller. + // This is used to calculate the user agent string. + // If not set, it defaults to "cluster-cache-tracker". + ControllerName string } func setDefaultOptions(opts *ClusterCacheTrackerOptions) { @@ -106,6 +130,9 @@ func setDefaultOptions(opts *ClusterCacheTrackerOptions) { opts.Log = &l } + l := opts.Log.WithValues("component", "remote/clustercachetracker") + opts.Log = &l + if len(opts.ClientUncachedObjects) == 0 { opts.ClientUncachedObjects = []client.Object{ &corev1.ConfigMap{}, @@ -118,6 +145,11 @@ func setDefaultOptions(opts *ClusterCacheTrackerOptions) { func NewClusterCacheTracker(manager ctrl.Manager, options ClusterCacheTrackerOptions) (*ClusterCacheTracker, error) { setDefaultOptions(&options) + controllerName := options.ControllerName + if controllerName == "" { + controllerName = clusterCacheControllerName + } + var controllerPodMetadata *metav1.ObjectMeta podNamespace := os.Getenv("POD_NAMESPACE") podName := os.Getenv("POD_NAME") @@ -134,10 +166,12 @@ func NewClusterCacheTracker(manager ctrl.Manager, options ClusterCacheTrackerOpt } return &ClusterCacheTracker{ + controllerName: controllerName, controllerPodMetadata: controllerPodMetadata, log: *options.Log, clientUncachedObjects: options.ClientUncachedObjects, client: manager.GetClient(), + secretCachingClient: options.SecretCachingClient, scheme: manager.GetScheme(), clusterAccessors: make(map[client.ObjectKey]*clusterAccessor), clusterLock: newKeyedMutex(), @@ -147,7 +181,7 @@ func NewClusterCacheTracker(manager ctrl.Manager, options ClusterCacheTrackerOpt // GetClient returns a cached client for the given cluster. func (t *ClusterCacheTracker) GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) { - accessor, err := t.getClusterAccessor(ctx, cluster, t.indexes...) + accessor, err := t.getClusterAccessor(ctx, cluster) if err != nil { return nil, err } @@ -155,9 +189,14 @@ func (t *ClusterCacheTracker) GetClient(ctx context.Context, cluster client.Obje return accessor.client, nil } +// GetReader returns a cached read-only client for the given cluster. +func (t *ClusterCacheTracker) GetReader(ctx context.Context, cluster client.ObjectKey) (client.Reader, error) { + return t.GetClient(ctx, cluster) +} + // GetRESTConfig returns a cached REST config for the given cluster. func (t *ClusterCacheTracker) GetRESTConfig(ctc context.Context, cluster client.ObjectKey) (*rest.Config, error) { - accessor, err := t.getClusterAccessor(ctc, cluster, t.indexes...) + accessor, err := t.getClusterAccessor(ctc, cluster) if err != nil { return nil, err } @@ -165,12 +204,23 @@ func (t *ClusterCacheTracker) GetRESTConfig(ctc context.Context, cluster client. return accessor.config, nil } +// GetEtcdClientCertificateKey returns a cached certificate key to be used for generating certificates for accessing etcd in the given cluster. +func (t *ClusterCacheTracker) GetEtcdClientCertificateKey(ctx context.Context, cluster client.ObjectKey) (*rsa.PrivateKey, error) { + accessor, err := t.getClusterAccessor(ctx, cluster) + if err != nil { + return nil, err + } + + return accessor.etcdClientCertificateKey, nil +} + // clusterAccessor represents the combination of a delegating client, cache, and watches for a remote cluster. type clusterAccessor struct { - cache *stoppableCache - client client.Client - watches sets.Set[string] - config *rest.Config + cache *stoppableCache + client client.Client + watches sets.Set[string] + config *rest.Config + etcdClientCertificateKey *rsa.PrivateKey } // clusterAccessorExists returns true if a clusterAccessor exists for cluster. @@ -204,7 +254,7 @@ func (t *ClusterCacheTracker) storeAccessor(cluster client.ObjectKey, accessor * // It then falls back to create a new clusterAccessor if needed. // If there is already another go routine trying to create a clusterAccessor // for the same cluster, an error is returned. -func (t *ClusterCacheTracker) getClusterAccessor(ctx context.Context, cluster client.ObjectKey, indexes ...Index) (*clusterAccessor, error) { +func (t *ClusterCacheTracker) getClusterAccessor(ctx context.Context, cluster client.ObjectKey) (*clusterAccessor, error) { log := ctrl.LoggerFrom(ctx, "cluster", klog.KRef(cluster.Namespace, cluster.Name)) // If the clusterAccessor already exists, return early. @@ -229,7 +279,7 @@ func (t *ClusterCacheTracker) getClusterAccessor(ctx context.Context, cluster cl // We are the go routine who has to initialize the clusterAccessor. log.V(4).Info("Creating new cluster accessor") - accessor, err := t.newClusterAccessor(ctx, cluster, indexes...) + accessor, err := t.newClusterAccessor(ctx, cluster) if err != nil { return nil, errors.Wrap(err, "failed to create cluster accessor") } @@ -240,23 +290,35 @@ func (t *ClusterCacheTracker) getClusterAccessor(ctx context.Context, cluster cl } // newClusterAccessor creates a new clusterAccessor. -func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster client.ObjectKey, indexes ...Index) (*clusterAccessor, error) { +func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster client.ObjectKey) (*clusterAccessor, error) { log := ctrl.LoggerFrom(ctx) - // Get a rest config for the remote cluster - config, err := RESTConfig(ctx, clusterCacheControllerName, t.client, cluster) + // Get a rest config for the remote cluster. + // Use the secretCachingClient if set. + secretClient := t.client + if t.secretCachingClient != nil { + secretClient = t.secretCachingClient + } + config, err := RESTConfig(ctx, t.controllerName, secretClient, cluster) if err != nil { return nil, errors.Wrapf(err, "error fetching REST client config for remote cluster %q", cluster.String()) } - // Create a client and a mapper for the cluster. - c, mapper, err := t.createClient(config, cluster) + // Create a http client and a mapper for the cluster. + httpClient, mapper, err := t.createHTTPClientAndMapper(config, cluster) + if err != nil { + return nil, errors.Wrapf(err, "error creating http client and mapper for remote cluster %q", cluster.String()) + } + + // Create an uncached client for the cluster. + uncachedClient, err := t.createUncachedClient(config, cluster, httpClient, mapper) if err != nil { return nil, err } // Detect if the controller is running on the workload cluster. - runningOnCluster, err := t.runningOnWorkloadCluster(ctx, c, cluster) + // This function uses an uncached client to ensure pods aren't cached by the long-lived client. + runningOnCluster, err := t.runningOnWorkloadCluster(ctx, uncachedClient, cluster) if err != nil { return nil, err } @@ -266,7 +328,7 @@ func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster cl if runningOnCluster { inClusterConfig, err := ctrl.GetConfig() if err != nil { - return nil, errors.Wrap(err, "error creating client for self-hosted cluster") + return nil, errors.Wrapf(err, "error creating client for self-hosted cluster %q", cluster.String()) } // Use CA and Host from in-cluster config. @@ -274,71 +336,37 @@ func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster cl config.CAFile = inClusterConfig.CAFile config.Host = inClusterConfig.Host - // Create a new client and overwrite the previously created client. - c, mapper, err = t.createClient(config, cluster) + // Update the http client and the mapper to use in-cluster config. + httpClient, mapper, err = t.createHTTPClientAndMapper(config, cluster) if err != nil { - return nil, errors.Wrap(err, "error creating client for self-hosted cluster") + return nil, errors.Wrapf(err, "error creating http client and mapper (using in-cluster config) for remote cluster %q", cluster.String()) } + log.Info(fmt.Sprintf("Creating cluster accessor for cluster %q with in-cluster service %q", cluster.String(), config.Host)) } else { log.Info(fmt.Sprintf("Creating cluster accessor for cluster %q with the regular apiserver endpoint %q", cluster.String(), config.Host)) } - // Create the cache for the remote cluster - cacheOptions := cache.Options{ - Scheme: t.scheme, - Mapper: mapper, - } - remoteCache, err := cache.New(config, cacheOptions) + // Create a client and a cache for the cluster. + cachedClient, err := t.createCachedClient(ctx, config, cluster, httpClient, mapper) if err != nil { - return nil, errors.Wrapf(err, "error creating cache for remote cluster %q", cluster.String()) - } - - cacheCtx, cacheCtxCancel := context.WithCancel(ctx) - - // We need to be able to stop the cache's shared informers, so wrap this in a stoppableCache. - cache := &stoppableCache{ - Cache: remoteCache, - cancelFunc: cacheCtxCancel, - } - - for _, index := range indexes { - if err := cache.IndexField(ctx, index.Object, index.Field, index.ExtractValue); err != nil { - return nil, fmt.Errorf("failed to index field %s: %w", index.Field, err) - } - } - - // Start the cache!!! - go cache.Start(cacheCtx) //nolint:errcheck - - // Wait until the cache is initially synced - cacheSyncCtx, cacheSyncCtxCancel := context.WithTimeout(ctx, initialCacheSyncTimeout) - defer cacheSyncCtxCancel() - if !cache.WaitForCacheSync(cacheSyncCtx) { - cache.Stop() - return nil, fmt.Errorf("failed waiting for cache for remote cluster %v to sync: %w", cluster, cacheCtx.Err()) + return nil, err } - // Start cluster healthcheck!!! - go t.healthCheckCluster(cacheCtx, &healthCheckInput{ - cluster: cluster, - cfg: config, - }) - - delegatingClient, err := client.NewDelegatingClient(client.NewDelegatingClientInput{ - CacheReader: cache, - Client: c, - UncachedObjects: t.clientUncachedObjects, - }) + // Generating a new private key to be used for generating temporary certificates to connect to + // etcd on the target cluster. + // NOTE: Generating a private key is an expensive operation, so we store it in the cluster accessor. + etcdKey, err := certs.NewPrivateKey() if err != nil { - return nil, err + return nil, errors.Wrapf(err, "error creating etcd client key for remote cluster %q", cluster.String()) } return &clusterAccessor{ - cache: cache, - config: config, - client: delegatingClient, - watches: sets.Set[string]{}, + cache: cachedClient.Cache, + config: config, + client: cachedClient.Client, + watches: sets.Set[string]{}, + etcdClientCertificateKey: etcdKey, }, nil } @@ -368,21 +396,122 @@ func (t *ClusterCacheTracker) runningOnWorkloadCluster(ctx context.Context, c cl return t.controllerPodMetadata.UID == pod.UID, nil } -// createClient creates a client and a mapper based on a rest.Config. -func (t *ClusterCacheTracker) createClient(config *rest.Config, cluster client.ObjectKey) (client.Client, meta.RESTMapper, error) { +// createHTTPClientAndMapper creates a http client and a dynamic rest mapper for the given cluster, based on the rest.Config. +func (t *ClusterCacheTracker) createHTTPClientAndMapper(config *rest.Config, cluster client.ObjectKey) (*http.Client, meta.RESTMapper, error) { + // Create a http client for the cluster. + httpClient, err := rest.HTTPClientFor(config) + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q: error creating http client", cluster.String()) + } + // Create a mapper for it - mapper, err := apiutil.NewDynamicRESTMapper(config) + mapper, err := apiutil.NewDynamicRESTMapper(config, httpClient) + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q: error creating dynamic rest mapper", cluster.String()) + } + + // Verify if we can get a rest mapping from the workload cluster apiserver. + // Note: This also checks if the apiserver is up in general. We do this already here + // to avoid further effort creating a cache and a client and to produce a clearer error message. + _, err = mapper.RESTMapping(corev1.SchemeGroupVersion.WithKind("Node").GroupKind(), corev1.SchemeGroupVersion.Version) + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q: error getting rest mapping", cluster.String()) + } + + return httpClient, mapper, nil +} + +// createUncachedClient creates an uncached client for the given cluster, based on the rest.Config. +func (t *ClusterCacheTracker) createUncachedClient(config *rest.Config, cluster client.ObjectKey, httpClient *http.Client, mapper meta.RESTMapper) (client.Client, error) { + // Create the uncached client for the remote cluster + uncachedClient, err := client.New(config, client.Options{ + Scheme: t.scheme, + Mapper: mapper, + HTTPClient: httpClient, + }) + if err != nil { + return nil, errors.Wrapf(err, "error creating uncached client for remote cluster %q", cluster.String()) + } + + return uncachedClient, nil +} + +type cachedClientOutput struct { + Client client.Client + Cache *stoppableCache +} + +// createCachedClient creates a cached client for the given cluster, based on a rest.Config. +func (t *ClusterCacheTracker) createCachedClient(ctx context.Context, config *rest.Config, cluster client.ObjectKey, httpClient *http.Client, mapper meta.RESTMapper) (*cachedClientOutput, error) { + // Create the cache for the remote cluster + cacheOptions := cache.Options{ + HTTPClient: httpClient, + Scheme: t.scheme, + Mapper: mapper, + } + remoteCache, err := cache.New(config, cacheOptions) if err != nil { - return nil, nil, errors.Wrapf(err, "error creating dynamic rest mapper for remote cluster %q", cluster.String()) + return nil, errors.Wrapf(err, "error creating cached client for remote cluster %q: error creating cache", cluster.String()) + } + + cacheCtx, cacheCtxCancel := context.WithCancel(ctx) + + // We need to be able to stop the cache's shared informers, so wrap this in a stoppableCache. + cache := &stoppableCache{ + Cache: remoteCache, + cancelFunc: cacheCtxCancel, + } + + for _, index := range t.indexes { + if err := cache.IndexField(ctx, index.Object, index.Field, index.ExtractValue); err != nil { + return nil, errors.Wrapf(err, "error creating cached client for remote cluster %q: error adding index for field %q to cache", cluster.String(), index.Field) + } } // Create the client for the remote cluster - c, err := client.New(config, client.Options{Scheme: t.scheme, Mapper: mapper}) + cachedClient, err := client.New(config, client.Options{ + Scheme: t.scheme, + Mapper: mapper, + HTTPClient: httpClient, + Cache: &client.CacheOptions{ + Reader: cache, + DisableFor: t.clientUncachedObjects, + Unstructured: true, + }, + }) if err != nil { - return nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q", cluster.String()) + return nil, errors.Wrapf(err, "error creating cached client for remote cluster %q", cluster.String()) } - return c, mapper, nil + // Start the cache!!! + go cache.Start(cacheCtx) //nolint:errcheck + + // Wait until the cache is initially synced + cacheSyncCtx, cacheSyncCtxCancel := context.WithTimeout(ctx, initialCacheSyncTimeout) + defer cacheSyncCtxCancel() + if !cache.WaitForCacheSync(cacheSyncCtx) { + cache.Stop() + return nil, fmt.Errorf("failed waiting for cache for remote cluster %v to sync: %w", cluster, cacheCtx.Err()) + } + + // Wrap the cached client with a client that sets timeouts on all Get and List calls + // If we don't set timeouts here Get and List calls can get stuck if they lazily create a new informer + // and the informer than doesn't sync because the workload cluster apiserver is not reachable. + // An alternative would be to set timeouts in the contexts we pass into all Get and List calls. + // It should be reasonable to have Get and List calls timeout within the duration configured in the restConfig. + cachedClient = newClientWithTimeout(cachedClient, config.Timeout) + + // Start cluster healthcheck!!! + go t.healthCheckCluster(cacheCtx, &healthCheckInput{ + cluster: cluster, + cfg: config, + httpClient: httpClient, + }) + + return &cachedClientOutput{ + Client: cachedClient, + Cache: cache, + }, nil } // deleteAccessor stops a clusterAccessor's cache and removes the clusterAccessor from the tracker. @@ -407,7 +536,7 @@ func (t *ClusterCacheTracker) deleteAccessor(_ context.Context, cluster client.O // Watcher is a scoped-down interface from Controller that only knows how to watch. type Watcher interface { // Watch watches src for changes, sending events to eventHandler if they pass predicates. - Watch(src source.Source, eventHandler handler.EventHandler, predicates ...predicate.Predicate) error + Watch(src source.Source) error } // WatchInput specifies the parameters used to establish a new watch for a remote cluster. @@ -437,7 +566,7 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error return errors.New("input.Name is required") } - accessor, err := t.getClusterAccessor(ctx, input.Cluster, t.indexes...) + accessor, err := t.getClusterAccessor(ctx, input.Cluster) if err != nil { return errors.Wrapf(err, "failed to add %s watch on cluster %s", input.Kind, klog.KRef(input.Cluster.Namespace, input.Cluster.Name)) } @@ -451,12 +580,12 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error if accessor.watches.Has(input.Name) { log := ctrl.LoggerFrom(ctx) - log.V(6).Info("Watch already exists", "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "name", input.Name) + log.V(6).Info(fmt.Sprintf("Watch %s already exists", input.Name), "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name)) return nil } // Need to create the watch - if err := input.Watcher.Watch(source.NewKindWithCache(input.Kind, accessor.cache), input.EventHandler, input.Predicates...); err != nil { + if err := input.Watcher.Watch(source.Kind(accessor.cache, input.Kind, input.EventHandler, input.Predicates...)); err != nil { return errors.Wrapf(err, "failed to add %s watch on cluster %s: failed to create watch", input.Kind, klog.KRef(input.Cluster.Namespace, input.Cluster.Name)) } @@ -468,6 +597,7 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error // healthCheckInput provides the input for the healthCheckCluster method. type healthCheckInput struct { cluster client.ObjectKey + httpClient *http.Client cfg *rest.Config interval time.Duration requestTimeout time.Duration @@ -505,9 +635,9 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()} cfg := rest.CopyConfig(in.cfg) cfg.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) - restClient, restClientErr := rest.UnversionedRESTClientFor(cfg) + restClient, restClientErr := rest.UnversionedRESTClientForConfigAndClient(cfg, in.httpClient) - runHealthCheckWithThreshold := func() (bool, error) { + runHealthCheckWithThreshold := func(ctx context.Context) (bool, error) { if restClientErr != nil { return false, restClientErr } @@ -563,13 +693,47 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health return false, nil } - err := wait.PollImmediateUntil(in.interval, runHealthCheckWithThreshold, ctx.Done()) - // An error returned implies the health check has failed a sufficient number of - // times for the cluster to be considered unhealthy - // NB. we are ignoring ErrWaitTimeout because this error happens when the channel is close, that in this case - // happens when the cache is explicitly stopped. - if err != nil && err != wait.ErrWaitTimeout { + err := wait.PollUntilContextCancel(ctx, in.interval, true, runHealthCheckWithThreshold) + // An error returned implies the health check has failed a sufficient number of times for the cluster + // to be considered unhealthy or the cache was stopped and thus the cache context canceled (we pass the + // cache context into wait.PollUntilContextCancel). + // NB. Log all errors that occurred even if this error might just be from a cancel of the cache context + // when the cache is stopped. Logging an error in this case is not a problem and makes debugging easier. + if err != nil { t.log.Error(err, "Error health checking cluster", "Cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name)) - t.deleteAccessor(ctx, in.cluster) } + // Ensure in any case that the accessor is deleted (even if it is a no-op). + // NB. It is crucial to ensure the accessor was deleted, so it can be later recreated when the + // cluster is reachable again + t.deleteAccessor(ctx, in.cluster) +} + +// newClientWithTimeout returns a new client which sets the specified timeout on all Get and List calls. +// If we don't set timeouts here Get and List calls can get stuck if they lazily create a new informer +// and the informer than doesn't sync because the workload cluster apiserver is not reachable. +// An alternative would be to set timeouts in the contexts we pass into all Get and List calls. +func newClientWithTimeout(client client.Client, timeout time.Duration) client.Client { + return clientWithTimeout{ + Client: client, + timeout: timeout, + } +} + +type clientWithTimeout struct { + client.Client + timeout time.Duration +} + +var _ client.Client = &clientWithTimeout{} + +func (c clientWithTimeout) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return c.Client.Get(ctx, key, obj, opts...) +} + +func (c clientWithTimeout) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return c.Client.List(ctx, list, opts...) } diff --git a/controllers/remote/cluster_cache_tracker_fake.go b/controllers/remote/cluster_cache_tracker_fake.go index 908eee6375ad..6062b967a637 100644 --- a/controllers/remote/cluster_cache_tracker_fake.go +++ b/controllers/remote/cluster_cache_tracker_fake.go @@ -24,25 +24,18 @@ import ( ) // NewTestClusterCacheTracker creates a new fake ClusterCacheTracker that can be used by unit tests with fake client. -func NewTestClusterCacheTracker(log logr.Logger, cl client.Client, scheme *runtime.Scheme, objKey client.ObjectKey, watchObjects ...string) *ClusterCacheTracker { +func NewTestClusterCacheTracker(log logr.Logger, cl client.Client, remoteClient client.Client, scheme *runtime.Scheme, objKey client.ObjectKey, watchObjects ...string) *ClusterCacheTracker { testCacheTracker := &ClusterCacheTracker{ log: log, client: cl, scheme: scheme, clusterAccessors: make(map[client.ObjectKey]*clusterAccessor), - } - - delegatingClient, err := client.NewDelegatingClient(client.NewDelegatingClientInput{ - CacheReader: cl, - Client: cl, - }) - if err != nil { - panic(err) + clusterLock: newKeyedMutex(), } testCacheTracker.clusterAccessors[objKey] = &clusterAccessor{ cache: nil, - client: delegatingClient, + client: remoteClient, watches: sets.Set[string]{}.Insert(watchObjects...), } return testCacheTracker diff --git a/controllers/remote/cluster_cache_tracker_test.go b/controllers/remote/cluster_cache_tracker_test.go index 52f25346f755..adbf30c45ebe 100644 --- a/controllers/remote/cluster_cache_tracker_test.go +++ b/controllers/remote/cluster_cache_tracker_test.go @@ -18,6 +18,7 @@ package remote import ( "context" + "fmt" "testing" "github.com/davecgh/go-spew/spew" @@ -31,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -38,17 +40,21 @@ import ( "sigs.k8s.io/cluster-api/util/conditions" ) -func mapper(i client.Object) []reconcile.Request { +func mapper(_ context.Context, i client.Object) []reconcile.Request { return []reconcile.Request{ { NamespacedName: types.NamespacedName{ Namespace: i.GetNamespace(), - Name: "mapped-" + i.GetName(), + Name: getMappedName(i.GetName()), }, }, } } +func getMappedName(name string) string { + return fmt.Sprintf("mapped-%s", name) +} + func TestClusterCacheTracker(t *testing.T) { t.Run("watching", func(t *testing.T) { var ( @@ -68,16 +74,18 @@ func TestClusterCacheTracker(t *testing.T) { t.Log("Setting up a new manager") var err error mgr, err = manager.New(env.Config, manager.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, }) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) c = &testController{ ch: make(chan string), } w, err = ctrl.NewControllerManagedBy(mgr).For(&clusterv1.MachineDeployment{}).Build(c) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) mgrContext, mgrCancel = context.WithCancel(ctx) t.Log("Starting the manager") @@ -90,13 +98,13 @@ func TestClusterCacheTracker(t *testing.T) { t.Log("Setting up a ClusterCacheTracker") cct, err = NewClusterCacheTracker(mgr, ClusterCacheTrackerOptions{ - Indexes: DefaultIndexes, + Indexes: []Index{NodeProviderIDIndex}, }) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) t.Log("Creating a namespace for the test") ns, err := env.CreateNamespace(ctx, "cluster-cache-tracker-test") - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) t.Log("Creating a test cluster") clusterA = &clusterv1.Cluster{ @@ -124,7 +132,7 @@ func TestClusterCacheTracker(t *testing.T) { g.Expect(cleanupTestSecrets(ctx, k8sClient)).To(Succeed()) t.Log("Deleting any Clusters") g.Expect(cleanupTestClusters(ctx, k8sClient)).To(Succeed()) - g.Expect(<-c.ch).To(Equal("mapped-" + clusterA.Name)) + g.Expect(<-c.ch).To(Equal(getMappedName(clusterA.Name))) g.Consistently(c.ch).ShouldNot(Receive()) t.Log("Deleting Namespace") g.Expect(env.Delete(ctx, ns)).To(Succeed()) @@ -147,7 +155,7 @@ func TestClusterCacheTracker(t *testing.T) { })).To(Succeed()) t.Log("Waiting to receive the watch notification") - g.Expect(<-c.ch).To(Equal("mapped-" + clusterA.Name)) + g.Expect(<-c.ch).To(Equal(getMappedName(clusterA.Name))) t.Log("Ensuring no additional watch notifications arrive") g.Consistently(c.ch).ShouldNot(Receive()) @@ -159,7 +167,7 @@ func TestClusterCacheTracker(t *testing.T) { g.Expect(k8sClient.Update(ctx, clusterA)).To(Succeed()) t.Log("Waiting to receive the watch notification") - g.Expect(<-c.ch).To(Equal("mapped-" + clusterA.Name)) + g.Expect(<-c.ch).To(Equal(getMappedName(clusterA.Name))) t.Log("Ensuring no additional watch notifications arrive") g.Consistently(c.ch).ShouldNot(Receive()) @@ -181,7 +189,7 @@ func TestClusterCacheTracker(t *testing.T) { g.Expect(k8sClient.Update(ctx, clusterA)).To(Succeed()) t.Log("Waiting to receive the watch notification") - g.Expect(<-c.ch).To(Equal("mapped-" + clusterA.Name)) + g.Expect(<-c.ch).To(Equal(getMappedName(clusterA.Name))) t.Log("Ensuring no additional watch notifications arrive") g.Consistently(c.ch).ShouldNot(Receive()) diff --git a/controllers/remote/cluster_test.go b/controllers/remote/cluster_test.go index 0fe782b62ec6..6e42c50e05da 100644 --- a/controllers/remote/cluster_test.go +++ b/controllers/remote/cluster_test.go @@ -89,13 +89,18 @@ func TestNewClusterClient(t *testing.T) { gs := NewWithT(t) client := fake.NewClientBuilder().WithObjects(validSecret).Build() - _, err := NewClusterClient(ctx, "test-source", client, clusterWithValidKubeConfig) + c, err := NewClusterClient(ctx, "test-source", client, clusterWithValidKubeConfig) + gs.Expect(err).ToNot(HaveOccurred()) + // Since we do not have a remote server to connect to, we should expect to get // an error to that effect for the purpose of this test. + // Note: The error occurs here and not in `NewClusterClient` as with the lazy + // restmapper only the List call actually communicates with the server. + err = c.List(ctx, &corev1.NodeList{}) gs.Expect(err).To(MatchError(ContainSubstring("no such host"))) restConfig, err := RESTConfig(ctx, "test-source", client, clusterWithValidKubeConfig) - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(restConfig.Host).To(Equal("https://test-cluster-api.nodomain.example.com:6443")) gs.Expect(restConfig.UserAgent).To(MatchRegexp("remote.test/unknown test-source (.*) cluster.x-k8s.io/unknown")) gs.Expect(restConfig.Timeout).To(Equal(10 * time.Second)) diff --git a/controllers/remote/index.go b/controllers/remote/index.go index 275b09439735..16a7f5810240 100644 --- a/controllers/remote/index.go +++ b/controllers/remote/index.go @@ -30,11 +30,9 @@ type Index struct { ExtractValue client.IndexerFunc } -var nodeProviderIDIndex = Index{ +// NodeProviderIDIndex is used to index Nodes by ProviderID. +var NodeProviderIDIndex = Index{ Object: &corev1.Node{}, Field: index.NodeProviderIDField, ExtractValue: index.NodeByProviderID, } - -// DefaultIndexes is the default list of indexes on a ClusterCacheTracker. -var DefaultIndexes = []Index{nodeProviderIDIndex} diff --git a/controlplane/kubeadm/PROJECT b/controlplane/kubeadm/PROJECT deleted file mode 100644 index 57a40a5509d2..000000000000 --- a/controlplane/kubeadm/PROJECT +++ /dev/null @@ -1,19 +0,0 @@ -version: "2" -domain: controlplane.cluster.x-k8s.io -repo: sigs.k8s.io/cluster-api/controlplane/kubeadm -resources: -- group: controlplane - version: v1alpha3 - kind: KubeadmControlPlane -- group: controlplane - version: v1alpha4 - kind: KubeadmControlPlane -- group: controlplane - kind: KubeadmControlPlaneTemplate - version: v1alpha4 -- group: controlplane - version: v1beta1 - kind: KubeadmControlPlane -- group: controlplane - kind: KubeadmControlPlaneTemplate - version: v1beta1 diff --git a/controlplane/kubeadm/api/.import-restrictions b/controlplane/kubeadm/api/.import-restrictions new file mode 100644 index 000000000000..f6f10b3ff544 --- /dev/null +++ b/controlplane/kubeadm/api/.import-restrictions @@ -0,0 +1,5 @@ +rules: + - selectorRegexp: sigs[.]k8s[.]io/controller-runtime + allowedPrefixes: + - "sigs.k8s.io/controller-runtime/pkg/conversion" + forbiddenPrefixes: [] diff --git a/controlplane/kubeadm/api/v1alpha3/webhook_test.go b/controlplane/kubeadm/api/v1alpha3/webhook_test.go deleted file mode 100644 index ceee2b38ebe2..000000000000 --- a/controlplane/kubeadm/api/v1alpha3/webhook_test.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha3 - -import ( - "fmt" - "testing" - - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client" - - bootstrapv1alpha3 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta1" - "sigs.k8s.io/cluster-api/util" -) - -func TestKubeadmControlPlaneConversion(t *testing.T) { - g := NewWithT(t) - - ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) - g.Expect(err).ToNot(HaveOccurred()) - infraMachineTemplateName := fmt.Sprintf("test-machinetemplate-%s", util.RandomString(5)) - controlPlaneName := fmt.Sprintf("test-controlpane-%s", util.RandomString(5)) - controlPlane := &KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: controlPlaneName, - Namespace: ns.Name, - }, - Spec: KubeadmControlPlaneSpec{ - Replicas: pointer.Int32(3), - Version: "v1.20.2", - InfrastructureTemplate: corev1.ObjectReference{ - APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3", - Kind: "TestMachineTemplate", - Namespace: ns.Name, - Name: infraMachineTemplateName, - }, - KubeadmConfigSpec: bootstrapv1alpha3.KubeadmConfigSpec{ - ClusterConfiguration: &upstreamv1beta1.ClusterConfiguration{ - APIServer: upstreamv1beta1.APIServer{ - ControlPlaneComponent: upstreamv1beta1.ControlPlaneComponent{ - ExtraArgs: map[string]string{ - "foo": "bar", - }, - ExtraVolumes: []upstreamv1beta1.HostPathMount{ - { - Name: "mount-path", - HostPath: "/foo", - MountPath: "/foo", - ReadOnly: false, - }, - }, - }, - }, - }, - InitConfiguration: &upstreamv1beta1.InitConfiguration{ - NodeRegistration: upstreamv1beta1.NodeRegistrationOptions{ - Name: "foo", - CRISocket: "unix:///var/run/containerd/containerd.sock", - }, - }, - JoinConfiguration: &upstreamv1beta1.JoinConfiguration{ - NodeRegistration: upstreamv1beta1.NodeRegistrationOptions{ - Name: "foo", - CRISocket: "unix:///var/run/containerd/containerd.sock", - }, - }, - }, - }, - } - - g.Expect(env.Create(ctx, controlPlane)).To(Succeed()) - defer func(do ...client.Object) { - g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) - }(ns, controlPlane) -} diff --git a/controlplane/kubeadm/api/v1beta1/.import-restrictions b/controlplane/kubeadm/api/v1beta1/.import-restrictions new file mode 100644 index 000000000000..a2e1dfd08133 --- /dev/null +++ b/controlplane/kubeadm/api/v1beta1/.import-restrictions @@ -0,0 +1,5 @@ +rules: + - selectorRegexp: sigs[.]k8s[.]io/controller-runtime + allowedPrefixes: [] + forbiddenPrefixes: + - "sigs.k8s.io/controller-runtime" diff --git a/controlplane/kubeadm/api/v1beta1/groupversion_info.go b/controlplane/kubeadm/api/v1beta1/groupversion_info.go index c7a7c5fdf216..4c2dadc700f9 100644 --- a/controlplane/kubeadm/api/v1beta1/groupversion_info.go +++ b/controlplane/kubeadm/api/v1beta1/groupversion_info.go @@ -20,17 +20,26 @@ limitations under the License. package v1beta1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( // GroupVersion is group version used to register these objects. GroupVersion = schema.GroupVersion{Group: "controlplane.cluster.x-k8s.io", Version: "v1beta1"} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme. - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + // schemeBuilder is used to add go types to the GroupVersionKind scheme. + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme + AddToScheme = schemeBuilder.AddToScheme + + objectTypes = []runtime.Object{} ) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, objectTypes...) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go b/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go index 272ffd12a4a1..1a9ccf073831 100644 --- a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go +++ b/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go @@ -102,6 +102,9 @@ type KubeadmControlPlaneSpec struct { // RolloutAfter is a field to indicate a rollout should be performed // after the specified time even if no changes have been made to the // KubeadmControlPlane. + // Example: In the YAML the time can be specified in the RFC3339 format. + // To specify the rolloutAfter target as March 9, 2023, at 9 am UTC + // use "2023-03-09T09:00:00Z". // +optional RolloutAfter *metav1.Time `json:"rolloutAfter,omitempty"` @@ -190,7 +193,7 @@ type RemediationStrategy struct { // M1 become unhealthy; remediation happens, and M1-1 is created as a replacement. // If M1-1 (replacement of M1) has problems while bootstrapping it will become unhealthy, and then be // remediated; such operation is considered a retry, remediation-retry #1. - // If M1-2 (replacement of M1-2) becomes unhealthy, remediation-retry #2 will happen, etc. + // If M1-2 (replacement of M1-1) becomes unhealthy, remediation-retry #2 will happen, etc. // // A retry could happen only after RetryPeriod from the previous retry. // If a machine is marked as unhealthy after MinHealthyPeriod from the previous remediation expired, @@ -267,8 +270,11 @@ type KubeadmControlPlaneStatus struct { // +optional Initialized bool `json:"initialized"` - // Ready denotes that the KubeadmControlPlane API Server is ready to - // receive requests. + // Ready denotes that the KubeadmControlPlane API Server became ready during initial provisioning + // to receive requests. + // NOTE: this field is part of the Cluster API contract and it is used to orchestrate provisioning. + // The value of this field is never updated after provisioning is completed. Please use conditions + // to check the operational state of the control plane. // +optional Ready bool `json:"ready"` @@ -356,5 +362,5 @@ type KubeadmControlPlaneList struct { } func init() { - SchemeBuilder.Register(&KubeadmControlPlane{}, &KubeadmControlPlaneList{}) + objectTypes = append(objectTypes, &KubeadmControlPlane{}, &KubeadmControlPlaneList{}) } diff --git a/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_types.go b/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_types.go index 69a1541d0397..478e30c7e786 100644 --- a/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_types.go +++ b/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_types.go @@ -51,7 +51,7 @@ type KubeadmControlPlaneTemplateList struct { } func init() { - SchemeBuilder.Register(&KubeadmControlPlaneTemplate{}, &KubeadmControlPlaneTemplateList{}) + objectTypes = append(objectTypes, &KubeadmControlPlaneTemplate{}, &KubeadmControlPlaneTemplateList{}) } // KubeadmControlPlaneTemplateResource describes the data needed to create a KubeadmControlPlane from a template. diff --git a/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_webhook.go b/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_webhook.go deleted file mode 100644 index f08e8e3a8134..000000000000 --- a/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_webhook.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - "reflect" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" - - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/feature" -) - -const kubeadmControlPlaneTemplateImmutableMsg = "KubeadmControlPlaneTemplate spec.template.spec field is immutable. Please create new resource instead." - -func (r *KubeadmControlPlaneTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} - -// +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplanetemplate,mutating=true,failurePolicy=fail,groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanetemplates,versions=v1beta1,name=default.kubeadmcontrolplanetemplate.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 - -var _ webhook.Defaulter = &KubeadmControlPlaneTemplate{} - -// Default implements webhook.Defaulter so a webhook will be registered for the type. -func (r *KubeadmControlPlaneTemplate) Default() { - bootstrapv1.DefaultKubeadmConfigSpec(&r.Spec.Template.Spec.KubeadmConfigSpec) - - r.Spec.Template.Spec.RolloutStrategy = defaultRolloutStrategy(r.Spec.Template.Spec.RolloutStrategy) -} - -// +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplanetemplate,mutating=false,failurePolicy=fail,groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanetemplates,versions=v1beta1,name=validation.kubeadmcontrolplanetemplate.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 - -var _ webhook.Validator = &KubeadmControlPlaneTemplate{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *KubeadmControlPlaneTemplate) ValidateCreate() error { - // NOTE: KubeadmControlPlaneTemplate is behind ClusterTopology feature gate flag; the web hook - // must prevent creating new objects in case the feature flag is disabled. - if !feature.Gates.Enabled(feature.ClusterTopology) { - return field.Forbidden( - field.NewPath("spec"), - "can be set only if the ClusterTopology feature flag is enabled", - ) - } - - spec := r.Spec.Template.Spec - allErrs := validateKubeadmControlPlaneTemplateResourceSpec(spec, field.NewPath("spec", "template", "spec")) - allErrs = append(allErrs, validateClusterConfiguration(spec.KubeadmConfigSpec.ClusterConfiguration, nil, field.NewPath("spec", "template", "spec", "kubeadmConfigSpec", "clusterConfiguration"))...) - allErrs = append(allErrs, spec.KubeadmConfigSpec.Validate(field.NewPath("spec", "template", "spec", "kubeadmConfigSpec"))...) - if len(allErrs) > 0 { - return apierrors.NewInvalid(GroupVersion.WithKind("KubeadmControlPlaneTemplate").GroupKind(), r.Name, allErrs) - } - return nil -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *KubeadmControlPlaneTemplate) ValidateUpdate(oldRaw runtime.Object) error { - var allErrs field.ErrorList - old, ok := oldRaw.(*KubeadmControlPlaneTemplate) - if !ok { - return apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlaneTemplate but got a %T", oldRaw)) - } - - if !reflect.DeepEqual(r.Spec.Template.Spec, old.Spec.Template.Spec) { - allErrs = append(allErrs, - field.Invalid(field.NewPath("spec", "template", "spec"), r, kubeadmControlPlaneTemplateImmutableMsg), - ) - } - - if len(allErrs) == 0 { - return nil - } - return apierrors.NewInvalid(GroupVersion.WithKind("KubeadmControlPlaneTemplate").GroupKind(), r.Name, allErrs) -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *KubeadmControlPlaneTemplate) ValidateDelete() error { - return nil -} - -// validateKubeadmControlPlaneTemplateResourceSpec is a copy of validateKubeadmControlPlaneSpec which -// only validates the fields in KubeadmControlPlaneTemplateResourceSpec we care about. -func validateKubeadmControlPlaneTemplateResourceSpec(s KubeadmControlPlaneTemplateResourceSpec, pathPrefix *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allErrs = append(allErrs, validateRolloutBefore(s.RolloutBefore, pathPrefix.Child("rolloutBefore"))...) - allErrs = append(allErrs, validateRolloutStrategy(s.RolloutStrategy, nil, pathPrefix.Child("rolloutStrategy"))...) - - return allErrs -} diff --git a/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_webhook_test.go b/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_webhook_test.go deleted file mode 100644 index 64ebebe6a0e0..000000000000 --- a/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_webhook_test.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "testing" - "time" - - . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilfeature "k8s.io/component-base/featuregate/testing" - - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/feature" - utildefaulting "sigs.k8s.io/cluster-api/util/defaulting" -) - -func TestKubeadmControlPlaneTemplateDefault(t *testing.T) { - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() - - g := NewWithT(t) - - kcpTemplate := &KubeadmControlPlaneTemplate{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "foo", - }, - Spec: KubeadmControlPlaneTemplateSpec{ - Template: KubeadmControlPlaneTemplateResource{ - Spec: KubeadmControlPlaneTemplateResourceSpec{ - MachineTemplate: &KubeadmControlPlaneTemplateMachineTemplate{ - NodeDrainTimeout: &metav1.Duration{Duration: 10 * time.Second}, - }, - }, - }, - }, - } - updateDefaultingValidationKCPTemplate := kcpTemplate.DeepCopy() - updateDefaultingValidationKCPTemplate.Spec.Template.Spec.MachineTemplate.NodeDrainTimeout = &metav1.Duration{Duration: 20 * time.Second} - t.Run("for KubeadmControlPlaneTemplate", utildefaulting.DefaultValidateTest(updateDefaultingValidationKCPTemplate)) - kcpTemplate.Default() - - g.Expect(kcpTemplate.Spec.Template.Spec.KubeadmConfigSpec.Format).To(Equal(bootstrapv1.CloudConfig)) - g.Expect(kcpTemplate.Spec.Template.Spec.RolloutStrategy.Type).To(Equal(RollingUpdateStrategyType)) - g.Expect(kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal).To(Equal(int32(1))) -} - -func TestKubeadmControlPlaneTemplateValidationFeatureGateEnabled(t *testing.T) { - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() - - t.Run("create kubeadmcontrolplanetemplate should pass if gate enabled and valid kubeadmcontrolplanetemplate", func(t *testing.T) { - testnamespace := "test" - g := NewWithT(t) - kcpTemplate := &KubeadmControlPlaneTemplate{ - ObjectMeta: metav1.ObjectMeta{ - Name: "kubeadmcontrolplanetemplate-test", - Namespace: testnamespace, - }, - Spec: KubeadmControlPlaneTemplateSpec{ - Template: KubeadmControlPlaneTemplateResource{ - Spec: KubeadmControlPlaneTemplateResourceSpec{ - MachineTemplate: &KubeadmControlPlaneTemplateMachineTemplate{ - NodeDrainTimeout: &metav1.Duration{Duration: time.Second}, - }, - }, - }, - }, - } - g.Expect(kcpTemplate.ValidateCreate()).To(Succeed()) - }) -} - -func TestKubeadmControlPlaneTemplateValidationFeatureGateDisabled(t *testing.T) { - // NOTE: ClusterTopology feature flag is disabled by default, thus preventing to create KubeadmControlPlaneTemplate. - t.Run("create kubeadmcontrolplanetemplate should not pass if gate disabled and valid kubeadmcontrolplanetemplate", func(t *testing.T) { - testnamespace := "test" - g := NewWithT(t) - kcpTemplate := &KubeadmControlPlaneTemplate{ - ObjectMeta: metav1.ObjectMeta{ - Name: "kubeadmcontrolplanetemplate-test", - Namespace: testnamespace, - }, - Spec: KubeadmControlPlaneTemplateSpec{ - Template: KubeadmControlPlaneTemplateResource{ - Spec: KubeadmControlPlaneTemplateResourceSpec{ - MachineTemplate: &KubeadmControlPlaneTemplateMachineTemplate{ - NodeDrainTimeout: &metav1.Duration{Duration: time.Second}, - }, - }, - }, - }, - } - g.Expect(kcpTemplate.ValidateCreate()).NotTo(Succeed()) - }) -} diff --git a/controlplane/kubeadm/api/v1beta1/zz_generated.deepcopy.go b/controlplane/kubeadm/api/v1beta1/zz_generated.deepcopy.go index ae9633f16c89..fdef3a38dfbd 100644 --- a/controlplane/kubeadm/api/v1beta1/zz_generated.deepcopy.go +++ b/controlplane/kubeadm/api/v1beta1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright The Kubernetes Authors. diff --git a/controlplane/kubeadm/config/certmanager/certificate.yaml b/controlplane/kubeadm/config/certmanager/certificate.yaml index d53b0e8e97e7..83d93081eb48 100644 --- a/controlplane/kubeadm/config/certmanager/certificate.yaml +++ b/controlplane/kubeadm/config/certmanager/certificate.yaml @@ -15,14 +15,14 @@ metadata: name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml namespace: system spec: - # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize dnsNames: - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + - SERVICE_NAME.SERVICE_NAMESPACE.svc + - SERVICE_NAME.SERVICE_NAMESPACE.svc.cluster.local issuerRef: kind: Issuer name: selfsigned-issuer - secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize + secretName: capi-kubeadm-control-plane-webhook-service-cert # this secret will not be prefixed, since it's not managed by kustomize subject: organizations: - - k8s-sig-cluster-lifecycle \ No newline at end of file + - k8s-sig-cluster-lifecycle diff --git a/controlplane/kubeadm/config/certmanager/kustomizeconfig.yaml b/controlplane/kubeadm/config/certmanager/kustomizeconfig.yaml index 28a895a404a9..2b4342655949 100644 --- a/controlplane/kubeadm/config/certmanager/kustomizeconfig.yaml +++ b/controlplane/kubeadm/config/certmanager/kustomizeconfig.yaml @@ -7,13 +7,3 @@ nameReference: group: cert-manager.io path: spec/issuerRef/name -varReference: -- kind: Certificate - group: cert-manager.io - path: spec/commonName -- kind: Certificate - group: cert-manager.io - path: spec/dnsNames -- kind: Certificate - group: cert-manager.io - path: spec/secretName diff --git a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml index 46f077a57b2b..1171314f06ed 100644 --- a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml +++ b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: kubeadmcontrolplanes.controlplane.cluster.x-k8s.io spec: group: controlplane.cluster.x-k8s.io @@ -51,21 +50,30 @@ spec: jsonPath: .status.unavailableReplicas name: Unavailable type: integer + deprecated: true name: v1alpha3 schema: openAPIV3Schema: - description: "KubeadmControlPlane is the Schema for the KubeadmControlPlane - API. \n Deprecated: This type will be removed in one of the next releases." + description: |- + KubeadmControlPlane is the Schema for the KubeadmControlPlane API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -73,46 +81,55 @@ spec: description: KubeadmControlPlaneSpec defines the desired state of KubeadmControlPlane. properties: infrastructureTemplate: - description: InfrastructureTemplate is a required reference to a custom - resource offered by an infrastructure provider. + description: |- + InfrastructureTemplate is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic kubeadmConfigSpec: - description: KubeadmConfigSpec is a KubeadmConfigSpec to use for initializing - and joining machines to the control plane. + description: |- + KubeadmConfigSpec is a KubeadmConfigSpec + to use for initializing and joining machines to the control plane. properties: clusterConfiguration: description: ClusterConfiguration along with InitConfiguration @@ -131,21 +148,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass - to the control plane component. TODO: This is temporary - and ideally we would like to switch all components to - use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that - will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod @@ -173,34 +192,34 @@ spec: type: string type: object apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string certificatesDir: - description: 'CertificatesDir specifies where to store or - look for all required certificates. NB: if not provided, - this will default to `/etc/kubernetes/pki`' + description: |- + CertificatesDir specifies where to store or look for all required certificates. + NB: if not provided, this will default to `/etc/kubernetes/pki` type: string clusterName: description: The cluster name type: string controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP address - or DNS name for the control plane; it can be a valid IP - address or a RFC-1123 DNS subdomain, both with optional - TCP port. In case the ControlPlaneEndpoint is not specified, - the AdvertiseAddress + BindPort are used; in case the ControlPlaneEndpoint - is specified but without a TCP port, the BindPort is used. - Possible usages are: e.g. In a cluster with more than one - control plane instances, this field should be assigned the - address of the external load balancer in front of the control - plane instances. e.g. in environments with enforced node - recycling, the ControlPlaneEndpoint could be used for assigning - a stable DNS to the control plane. NB: This value defaults - to the first value in the Cluster object status.apiEndpoints - array.' + description: |- + ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + the BindPort is used. + Possible usages are: + e.g. In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. + e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane. + NB: This value defaults to the first value in the Cluster object status.apiEndpoints array. type: string controllerManager: description: ControllerManager contains extra settings for @@ -209,21 +228,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass - to the control plane component. TODO: This is temporary - and ideally we would like to switch all components to - use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that - will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod @@ -251,37 +272,38 @@ spec: in the cluster. properties: imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for the - image. In case this value is set, kubeadm does not change - automatically the version of the above components during - upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string type: description: Type defines the DNS add-on to be used type: string type: object etcd: - description: 'Etcd holds configuration for etcd. NB: This - value defaults to a Local (stacked) etcd' + description: |- + Etcd holds configuration for etcd. + NB: This value defaults to a Local (stacked) etcd properties: external: - description: External describes how to connect to an external - etcd cluster Local and External are mutually exclusive + description: |- + External describes how to connect to an external etcd cluster + Local and External are mutually exclusive properties: caFile: - description: CAFile is an SSL Certificate Authority - file used to secure etcd communication. Required - if using a TLS connection. + description: |- + CAFile is an SSL Certificate Authority file used to secure etcd communication. + Required if using a TLS connection. type: string certFile: - description: CertFile is an SSL certification file - used to secure etcd communication. Required if using - a TLS connection. + description: |- + CertFile is an SSL certification file used to secure etcd communication. + Required if using a TLS connection. type: string endpoints: description: Endpoints of etcd members. Required for @@ -290,8 +312,9 @@ spec: type: string type: array keyFile: - description: KeyFile is an SSL key file used to secure - etcd communication. Required if using a TLS connection. + description: |- + KeyFile is an SSL key file used to secure etcd communication. + Required if using a TLS connection. type: string required: - caFile @@ -300,30 +323,31 @@ spec: - keyFile type: object local: - description: Local provides configuration knobs for configuring - the local etcd instance Local and External are mutually - exclusive + description: |- + Local provides configuration knobs for configuring the local etcd instance + Local and External are mutually exclusive properties: dataDir: - description: DataDir is the directory etcd will place - its data. Defaults to "/var/lib/etcd". + description: |- + DataDir is the directory etcd will place its data. + Defaults to "/var/lib/etcd". type: string extraArgs: additionalProperties: type: string - description: ExtraArgs are extra arguments provided - to the etcd binary when run inside a static pod. + description: |- + ExtraArgs are extra arguments provided to the etcd binary + when run inside a static pod. type: object imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for - the image. In case this value is set, kubeadm does - not change automatically the version of the above - components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string peerCertSANs: description: PeerCertSANs sets extra Subject Alternative @@ -345,46 +369,45 @@ spec: description: FeatureGates enabled by the user. type: object imageRepository: - description: ImageRepository sets the container registry to - pull images from. If empty, `k8s.gcr.io` will be used by - default; in case of kubernetes version is a CI build (kubernetes - version starts with `ci/` or `ci-cross/`) `gcr.io/k8s-staging-ci-images` - will be used as a default for control plane components and - for kube-proxy, while `k8s.gcr.io` will be used for all - the other images. + description: |- + ImageRepository sets the container registry to pull images from. + If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components and for kube-proxy, while `k8s.gcr.io` + will be used for all the other images. type: string kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string kubernetesVersion: - description: 'KubernetesVersion is the target version of the - control plane. NB: This value defaults to the Machine object - spec.version' + description: |- + KubernetesVersion is the target version of the control plane. + NB: This value defaults to the Machine object spec.version type: string networking: - description: 'Networking holds configuration for the networking - topology of the cluster. NB: This value defaults to the - Cluster object spec.clusterNetwork.' + description: |- + Networking holds configuration for the networking topology of the cluster. + NB: This value defaults to the Cluster object spec.clusterNetwork. properties: dnsDomain: description: DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". type: string podSubnet: - description: PodSubnet is the subnet used by pods. If - unset, the API server will not allocate CIDR ranges - for every node. Defaults to a comma-delimited string - of the Cluster object's spec.clusterNetwork.services.cidrBlocks - if that is set + description: |- + PodSubnet is the subnet used by pods. + If unset, the API server will not allocate CIDR ranges for every node. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.services.cidrBlocks if that is set type: string serviceSubnet: - description: ServiceSubnet is the subnet used by k8s services. - Defaults to a comma-delimited string of the Cluster - object's spec.clusterNetwork.pods.cidrBlocks, or to - "10.96.0.0/12" if that's unset. + description: |- + ServiceSubnet is the subnet used by k8s services. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, or + to "10.96.0.0/12" if that's unset. type: string type: object scheduler: @@ -394,21 +417,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass - to the control plane component. TODO: This is temporary - and ideally we would like to switch all components to - use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that - will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod @@ -464,9 +489,9 @@ spec: be used. If set to None, no label is used. type: string overwrite: - description: Overwrite defines whether or not to overwrite - any existing filesystem. If true, any pre-existing - file system will be destroyed. Use with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition to use. @@ -475,11 +500,9 @@ spec: number.' type: string replaceFS: - description: 'ReplaceFS is a special directive, used - for Microsoft Azure that instructs cloud-init to replace - a file system of . NOTE: unless you define - a label, this requires the use of the ''any'' partition - directive.' + description: |- + ReplaceFS is a special directive, used for Microsoft Azure that instructs cloud-init to replace a file system of . + NOTE: unless you define a label, this requires the use of the 'any' partition directive. type: string required: - device @@ -498,22 +521,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. If - it is true, a single partition will be created for - the entire device. When layout is false, it means - don't partition or ignore existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to skip checks - and create the partition if a partition or filesystem - is found on the device. Use with caution. Default - is 'false'. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of partition - table. The following are supported: ''mbr'': default - and setups a MS-DOS partition table ''gpt'': setups - a GPT partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -589,51 +611,52 @@ spec: are the configurations necessary for the init command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm init` - time and describes a set of Bootstrap Tokens to create. - This information IS NOT uploaded to the kubeadm cluster - configmap, partly because of its sensitive nature + description: |- + BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature items: description: BootstrapToken describes one bootstrap token, stored as a Secret in the cluster. properties: description: - description: Description sets a human-friendly message - why this token exists and what it's used for, so other - administrators can know its purpose. + description: |- + Description sets a human-friendly message why this token exists and what it's used + for, so other administrators can know its purpose. type: string expires: - description: Expires specifies the timestamp when this - token expires. Defaults to being set dynamically at - runtime based on the TTL. Expires and TTL are mutually - exclusive. + description: |- + Expires specifies the timestamp when this token expires. Defaults to being set + dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. format: date-time type: string groups: - description: Groups specifies the extra groups that - this token will authenticate as when/if used for authentication + description: |- + Groups specifies the extra groups that this token will authenticate as when/if + used for authentication items: type: string type: array token: - description: Token is used for establishing bidirectional - trust between nodes and control-planes. Used for joining - nodes in the cluster. + description: |- + Token is used for establishing bidirectional trust between nodes and control-planes. + Used for joining nodes in the cluster. type: string ttl: - description: TTL defines the time to live for this token. - Defaults to 24h. Expires and TTL are mutually exclusive. + description: |- + TTL defines the time to live for this token. Defaults to 24h. + Expires and TTL are mutually exclusive. type: string usages: - description: Usages describes the ways in which this - token can be used. Can by default be used for establishing - bidirectional trust, but that can be changed here. + description: |- + Usages describes the ways in which this token can be used. Can by default be used + for establishing bidirectional trust, but that can be changed here. items: type: string type: array @@ -642,31 +665,30 @@ spec: type: object type: array kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint of the - API server instance that's deployed on this control plane - node In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint - in the sense that ControlPlaneEndpoint is the global endpoint - for the cluster, which then loadbalances the requests to - each individual API server. This configuration object lets - you customize what IP/DNS name and port the local API server - advertises it's accessible on. By default, kubeadm tries - to auto-detect the IP of the default interface and use that, - but in case that process fails you may set the desired value - here. + description: |- + LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process + fails you may set the desired value here. properties: advertiseAddress: description: AdvertiseAddress sets the IP address for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the API - Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer required: @@ -674,11 +696,10 @@ spec: - bindPort type: object nodeRegistration: - description: NodeRegistration holds fields that relate to - registering the new control-plane node to the cluster. When - used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration and - JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -688,50 +709,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the - kubelet command line via the environment file kubeadm - writes at runtime for the kubelet to source. This overrides - the generic base-level configuration in the kubelet-config-1.X - ConfigMap Flags have higher priority when parsing. These - values are local and specific to the node kubeadm is - executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the - Node API object that will be created in this `kubeadm - init` or `kubeadm join` operation. This field is also - used in the CommonName field of the kubelet's client - certificate to the API server. Defaults to the hostname - of the node if not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API - object should be registered with. If this field is unset, - i.e. nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: {}` - in the YAML file. This field is solely used for Node - registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has - the "effect" on any pod that does not tolerate the - Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on - pods that do not tolerate the taint. Valid effects - are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -750,21 +762,23 @@ spec: the join command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string caCertPath: - description: 'CACertPath is the path to the SSL certificate - authority used to secure comunications between node and - control-plane. Defaults to "/etc/kubernetes/pki/ca.crt". - TODO: revisit when there is defaulting from k/k' + description: |- + CACertPath is the path to the SSL certificate authority used to + secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + TODO: revisit when there is defaulting from k/k type: string controlPlane: - description: ControlPlane defines the additional control plane - instance to be deployed on the joining node. If nil, no - additional control plane instance will be deployed. + description: |- + ControlPlane defines the additional control plane instance to be deployed on the joining node. + If nil, no additional control plane instance will be deployed. properties: localAPIEndpoint: description: LocalAPIEndpoint represents the endpoint @@ -775,8 +789,9 @@ spec: for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the - API Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer required: @@ -785,52 +800,50 @@ spec: type: object type: object discovery: - description: 'Discovery specifies the options for the kubelet - to use during the TLS Bootstrap process TODO: revisit when - there is defaulting from k/k' + description: |- + Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + TODO: revisit when there is defaulting from k/k properties: bootstrapToken: - description: BootstrapToken is used to set the options - for bootstrap token based discovery BootstrapToken and - File are mutually exclusive + description: |- + BootstrapToken is used to set the options for bootstrap token based discovery + BootstrapToken and File are mutually exclusive properties: apiServerEndpoint: description: APIServerEndpoint is an IP or domain name to the API server from which info will be fetched. type: string caCertHashes: - description: 'CACertHashes specifies a set of public - key pins to verify when token-based discovery is - used. The root CA found during discovery must match - one of these values. Specifying an empty set disables - root CA pinning, which can be unsafe. Each hash - is specified as ":", where the only - currently supported type is "sha256". This is a - hex-encoded SHA-256 hash of the Subject Public Key - Info (SPKI) object in DER-encoded ASN.1. These hashes - can be calculated using, for example, OpenSSL: openssl - x509 -pubkey -in ca.crt openssl rsa -pubin -outform - der 2>&/dev/null | openssl dgst -sha256 -hex' + description: |- + CACertHashes specifies a set of public key pins to verify + when token-based discovery is used. The root CA found during discovery + must match one of these values. Specifying an empty set disables root CA + pinning, which can be unsafe. Each hash is specified as ":", + where the only currently supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + ASN.1. These hashes can be calculated using, for example, OpenSSL: + openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex items: type: string type: array token: - description: Token is a token used to validate cluster - information fetched from the control-plane. + description: |- + Token is a token used to validate cluster information + fetched from the control-plane. type: string unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows token-based - discovery without CA verification via CACertHashes. - This can weaken the security of kubeadm since other - nodes can impersonate the control-plane. + description: |- + UnsafeSkipCAVerification allows token-based discovery + without CA verification via CACertHashes. This can weaken + the security of kubeadm since other nodes can impersonate the control-plane. type: boolean required: - token - unsafeSkipCAVerification type: object file: - description: File is used to specify a file or URL to - a kubeconfig file from which to load cluster information + description: |- + File is used to specify a file or URL to a kubeconfig file from which to load cluster information BootstrapToken and File are mutually exclusive properties: kubeConfigPath: @@ -845,27 +858,26 @@ spec: description: Timeout modifies the discovery timeout type: string tlsBootstrapToken: - description: 'TLSBootstrapToken is a token used for TLS - bootstrapping. If .BootstrapToken is set, this field - is defaulted to .BootstrapToken.Token, but can be overridden. - If .File is set, this field **must be set** in case - the KubeConfigFile does not contain any other authentication - information TODO: revisit when there is defaulting from - k/k' + description: |- + TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information + TODO: revisit when there is defaulting from k/k type: string type: object kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string nodeRegistration: - description: NodeRegistration holds fields that relate to - registering the new control-plane node to the cluster. When - used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration and - JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -875,50 +887,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the - kubelet command line via the environment file kubeadm - writes at runtime for the kubelet to source. This overrides - the generic base-level configuration in the kubelet-config-1.X - ConfigMap Flags have higher priority when parsing. These - values are local and specific to the node kubeadm is - executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the - Node API object that will be created in this `kubeadm - init` or `kubeadm join` operation. This field is also - used in the CommonName field of the kubelet's client - certificate to the API server. Defaults to the hostname - of the node if not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API - object should be registered with. If this field is unset, - i.e. nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: {}` - in the YAML file. This field is solely used for Node - registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has - the "effect" on any pod that does not tolerate the - Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on - pods that do not tolerate the taint. Valid effects - are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -966,13 +969,20 @@ spec: type: string type: array useExperimentalRetryJoin: - description: "UseExperimentalRetryJoin replaces a basic kubeadm - command with a shell script with retries for joins. \n This - is meant to be an experimental temporary workaround on some - environments where joins fail due to timing (and other issues). - The long term goal is to add retries to kubeadm proper and use - that functionality. \n This will add about 40KB to userdata - \n For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055." + description: |- + UseExperimentalRetryJoin replaces a basic kubeadm command with a shell + script with retries for joins. + + + This is meant to be an experimental temporary workaround on some environments + where joins fail due to timing (and other issues). The long term goal is to add retries to + kubeadm proper and use that functionality. + + + This will add about 40KB to userdata + + + For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. type: boolean users: description: Users specifies extra users to add @@ -1027,52 +1037,60 @@ spec: type: object type: array verbosity: - description: Verbosity is the number for the kubeadm log level - verbosity. It overrides the `--v` flag in kubeadm commands. + description: |- + Verbosity is the number for the kubeadm log level verbosity. + It overrides the `--v` flag in kubeadm commands. format: int32 type: integer type: object nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time that the - controller will spend on draining a controlplane node The default - value is 0, meaning that the node can be drained without any time - limitations. NOTE: NodeDrainTimeout is different from `kubectl drain - --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string replicas: - description: Number of desired machines. Defaults to 1. When stacked - etcd is used only odd numbers are permitted, as per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members). + description: |- + Number of desired machines. Defaults to 1. When stacked etcd is used only + odd numbers are permitted, as per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members). This is a pointer to distinguish between explicit zero and not specified. format: int32 type: integer rolloutStrategy: - description: The RolloutStrategy to use to replace control plane machines - with new ones. + description: |- + The RolloutStrategy to use to replace control plane machines with + new ones. properties: rollingUpdate: - description: Rolling update config params. Present only if RolloutStrategyType - = RollingUpdate. + description: |- + Rolling update config params. Present only if + RolloutStrategyType = RollingUpdate. properties: maxSurge: anyOf: - type: integer - type: string - description: 'The maximum number of control planes that can - be scheduled above or under the desired number of control - planes. Value can be an absolute number 1 or 0. Defaults - to 1. Example: when this is set to 1, the control plane - can be scaled up immediately when the rolling update starts.' + description: |- + The maximum number of control planes that can be scheduled above or under the + desired number of control planes. + Value can be an absolute number 1 or 0. + Defaults to 1. + Example: when this is set to 1, the control plane can be scaled + up immediately when the rolling update starts. x-kubernetes-int-or-string: true type: object type: - description: Type of rollout. Currently the only supported strategy - is "RollingUpdate". Default is RollingUpdate. + description: |- + Type of rollout. Currently the only supported strategy is + "RollingUpdate". + Default is RollingUpdate. type: string type: object upgradeAfter: - description: UpgradeAfter is a field to indicate an upgrade should - be performed after the specified time even if no changes have been - made to the KubeadmControlPlane + description: |- + UpgradeAfter is a field to indicate an upgrade should be performed + after the specified time even if no changes have been made to the + KubeadmControlPlane format: date-time type: string version: @@ -1093,37 +1111,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -1131,17 +1149,20 @@ spec: type: object type: array failureMessage: - description: ErrorMessage indicates that there is a terminal problem - reconciling the state, and will be set to a descriptive error message. + description: |- + ErrorMessage indicates that there is a terminal problem reconciling the + state, and will be set to a descriptive error message. type: string failureReason: - description: FailureReason indicates that there is a terminal problem - reconciling the state, and will be set to a token value suitable - for programmatic interpretation. + description: |- + FailureReason indicates that there is a terminal problem reconciling the + state, and will be set to a token value suitable for + programmatic interpretation. type: string initialized: - description: Initialized denotes whether or not the control plane - has the uploaded kubeadm-config configmap. + description: |- + Initialized denotes whether or not the control plane has the + uploaded kubeadm-config configmap. type: boolean observedGeneration: description: ObservedGeneration is the latest generation observed @@ -1149,8 +1170,9 @@ spec: format: int64 type: integer ready: - description: Ready denotes that the KubeadmControlPlane API Server - is ready to receive requests. + description: |- + Ready denotes that the KubeadmControlPlane API Server is ready to + receive requests. type: boolean readyReplicas: description: Total number of fully running and ready control plane @@ -1158,33 +1180,37 @@ spec: format: int32 type: integer replicas: - description: Total number of non-terminated machines targeted by this - control plane (their labels match the selector). + description: |- + Total number of non-terminated machines targeted by this control plane + (their labels match the selector). format: int32 type: integer selector: - description: 'Selector is the label selector in string format to avoid - introspection by clients, and is used to provide the CRD-based integration - for the scale subresource and additional integrations for things - like kubectl describe.. The string will be in the same format as - the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors' + description: |- + Selector is the label selector in string format to avoid introspection + by clients, and is used to provide the CRD-based integration for the + scale subresource and additional integrations for things like kubectl + describe.. The string will be in the same format as the query-param syntax. + More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors type: string unavailableReplicas: - description: Total number of unavailable machines targeted by this - control plane. This is the total number of machines that are still - required for the deployment to have 100% available capacity. They - may either be machines that are running but not yet ready or machines + description: |- + Total number of unavailable machines targeted by this control plane. + This is the total number of machines that are still required for + the deployment to have 100% available capacity. They may either + be machines that are running but not yet ready or machines that still have not been created. format: int32 type: integer updatedReplicas: - description: Total number of non-terminated machines targeted by this - control plane that have the desired template spec. + description: |- + Total number of non-terminated machines targeted by this control plane + that have the desired template spec. format: int32 type: integer type: object type: object - served: true + served: false storage: false subresources: scale: @@ -1228,21 +1254,30 @@ spec: jsonPath: .status.unavailableReplicas name: Unavailable type: integer + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "KubeadmControlPlane is the Schema for the KubeadmControlPlane - API. \n Deprecated: This type will be removed in one of the next releases." + description: |- + KubeadmControlPlane is the Schema for the KubeadmControlPlane API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -1250,8 +1285,9 @@ spec: description: KubeadmControlPlaneSpec defines the desired state of KubeadmControlPlane. properties: kubeadmConfigSpec: - description: KubeadmConfigSpec is a KubeadmConfigSpec to use for initializing - and joining machines to the control plane. + description: |- + KubeadmConfigSpec is a KubeadmConfigSpec + to use for initializing and joining machines to the control plane. properties: clusterConfiguration: description: ClusterConfiguration along with InitConfiguration @@ -1270,21 +1306,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass - to the control plane component. TODO: This is temporary - and ideally we would like to switch all components to - use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that - will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod @@ -1312,34 +1350,34 @@ spec: type: string type: object apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string certificatesDir: - description: 'CertificatesDir specifies where to store or - look for all required certificates. NB: if not provided, - this will default to `/etc/kubernetes/pki`' + description: |- + CertificatesDir specifies where to store or look for all required certificates. + NB: if not provided, this will default to `/etc/kubernetes/pki` type: string clusterName: description: The cluster name type: string controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP address - or DNS name for the control plane; it can be a valid IP - address or a RFC-1123 DNS subdomain, both with optional - TCP port. In case the ControlPlaneEndpoint is not specified, - the AdvertiseAddress + BindPort are used; in case the ControlPlaneEndpoint - is specified but without a TCP port, the BindPort is used. - Possible usages are: e.g. In a cluster with more than one - control plane instances, this field should be assigned the - address of the external load balancer in front of the control - plane instances. e.g. in environments with enforced node - recycling, the ControlPlaneEndpoint could be used for assigning - a stable DNS to the control plane. NB: This value defaults - to the first value in the Cluster object status.apiEndpoints - array.' + description: |- + ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + the BindPort is used. + Possible usages are: + e.g. In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. + e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane. + NB: This value defaults to the first value in the Cluster object status.apiEndpoints array. type: string controllerManager: description: ControllerManager contains extra settings for @@ -1348,21 +1386,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass - to the control plane component. TODO: This is temporary - and ideally we would like to switch all components to - use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that - will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod @@ -1390,34 +1430,35 @@ spec: in the cluster. properties: imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for the - image. In case this value is set, kubeadm does not change - automatically the version of the above components during - upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string type: object etcd: - description: 'Etcd holds configuration for etcd. NB: This - value defaults to a Local (stacked) etcd' + description: |- + Etcd holds configuration for etcd. + NB: This value defaults to a Local (stacked) etcd properties: external: - description: External describes how to connect to an external - etcd cluster Local and External are mutually exclusive + description: |- + External describes how to connect to an external etcd cluster + Local and External are mutually exclusive properties: caFile: - description: CAFile is an SSL Certificate Authority - file used to secure etcd communication. Required - if using a TLS connection. + description: |- + CAFile is an SSL Certificate Authority file used to secure etcd communication. + Required if using a TLS connection. type: string certFile: - description: CertFile is an SSL certification file - used to secure etcd communication. Required if using - a TLS connection. + description: |- + CertFile is an SSL certification file used to secure etcd communication. + Required if using a TLS connection. type: string endpoints: description: Endpoints of etcd members. Required for @@ -1426,8 +1467,9 @@ spec: type: string type: array keyFile: - description: KeyFile is an SSL key file used to secure - etcd communication. Required if using a TLS connection. + description: |- + KeyFile is an SSL key file used to secure etcd communication. + Required if using a TLS connection. type: string required: - caFile @@ -1436,30 +1478,31 @@ spec: - keyFile type: object local: - description: Local provides configuration knobs for configuring - the local etcd instance Local and External are mutually - exclusive + description: |- + Local provides configuration knobs for configuring the local etcd instance + Local and External are mutually exclusive properties: dataDir: - description: DataDir is the directory etcd will place - its data. Defaults to "/var/lib/etcd". + description: |- + DataDir is the directory etcd will place its data. + Defaults to "/var/lib/etcd". type: string extraArgs: additionalProperties: type: string - description: ExtraArgs are extra arguments provided - to the etcd binary when run inside a static pod. + description: |- + ExtraArgs are extra arguments provided to the etcd binary + when run inside a static pod. type: object imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for - the image. In case this value is set, kubeadm does - not change automatically the version of the above - components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string peerCertSANs: description: PeerCertSANs sets extra Subject Alternative @@ -1481,46 +1524,45 @@ spec: description: FeatureGates enabled by the user. type: object imageRepository: - description: ImageRepository sets the container registry to - pull images from. If empty, `registry.k8s.io` will be used - by default; in case of kubernetes version is a CI build - (kubernetes version starts with `ci/` or `ci-cross/`) `gcr.io/k8s-staging-ci-images` - will be used as a default for control plane components and - for kube-proxy, while `registry.k8s.io` will be used for - all the other images. + description: |- + ImageRepository sets the container registry to pull images from. + If empty, `registry.k8s.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components and for kube-proxy, while `registry.k8s.io` + will be used for all the other images. type: string kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string kubernetesVersion: - description: 'KubernetesVersion is the target version of the - control plane. NB: This value defaults to the Machine object - spec.version' + description: |- + KubernetesVersion is the target version of the control plane. + NB: This value defaults to the Machine object spec.version type: string networking: - description: 'Networking holds configuration for the networking - topology of the cluster. NB: This value defaults to the - Cluster object spec.clusterNetwork.' + description: |- + Networking holds configuration for the networking topology of the cluster. + NB: This value defaults to the Cluster object spec.clusterNetwork. properties: dnsDomain: description: DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". type: string podSubnet: - description: PodSubnet is the subnet used by pods. If - unset, the API server will not allocate CIDR ranges - for every node. Defaults to a comma-delimited string - of the Cluster object's spec.clusterNetwork.services.cidrBlocks - if that is set + description: |- + PodSubnet is the subnet used by pods. + If unset, the API server will not allocate CIDR ranges for every node. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.services.cidrBlocks if that is set type: string serviceSubnet: - description: ServiceSubnet is the subnet used by k8s services. - Defaults to a comma-delimited string of the Cluster - object's spec.clusterNetwork.pods.cidrBlocks, or to - "10.96.0.0/12" if that's unset. + description: |- + ServiceSubnet is the subnet used by k8s services. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, or + to "10.96.0.0/12" if that's unset. type: string type: object scheduler: @@ -1530,21 +1572,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass - to the control plane component. TODO: This is temporary - and ideally we would like to switch all components to - use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that - will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod @@ -1595,9 +1639,9 @@ spec: be used. If set to None, no label is used. type: string overwrite: - description: Overwrite defines whether or not to overwrite - any existing filesystem. If true, any pre-existing - file system will be destroyed. Use with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition to use. @@ -1606,11 +1650,9 @@ spec: number.' type: string replaceFS: - description: 'ReplaceFS is a special directive, used - for Microsoft Azure that instructs cloud-init to replace - a file system of . NOTE: unless you define - a label, this requires the use of the ''any'' partition - directive.' + description: |- + ReplaceFS is a special directive, used for Microsoft Azure that instructs cloud-init to replace a file system of . + NOTE: unless you define a label, this requires the use of the 'any' partition directive. type: string required: - device @@ -1629,22 +1671,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. If - it is true, a single partition will be created for - the entire device. When layout is false, it means - don't partition or ignore existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to skip checks - and create the partition if a partition or filesystem - is found on the device. Use with caution. Default - is 'false'. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of partition - table. The following are supported: ''mbr'': default - and setups a MS-DOS partition table ''gpt'': setups - a GPT partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -1720,51 +1761,52 @@ spec: are the configurations necessary for the init command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm init` - time and describes a set of Bootstrap Tokens to create. - This information IS NOT uploaded to the kubeadm cluster - configmap, partly because of its sensitive nature + description: |- + BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature items: description: BootstrapToken describes one bootstrap token, stored as a Secret in the cluster. properties: description: - description: Description sets a human-friendly message - why this token exists and what it's used for, so other - administrators can know its purpose. + description: |- + Description sets a human-friendly message why this token exists and what it's used + for, so other administrators can know its purpose. type: string expires: - description: Expires specifies the timestamp when this - token expires. Defaults to being set dynamically at - runtime based on the TTL. Expires and TTL are mutually - exclusive. + description: |- + Expires specifies the timestamp when this token expires. Defaults to being set + dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. format: date-time type: string groups: - description: Groups specifies the extra groups that - this token will authenticate as when/if used for authentication + description: |- + Groups specifies the extra groups that this token will authenticate as when/if + used for authentication items: type: string type: array token: - description: Token is used for establishing bidirectional - trust between nodes and control-planes. Used for joining - nodes in the cluster. + description: |- + Token is used for establishing bidirectional trust between nodes and control-planes. + Used for joining nodes in the cluster. type: string ttl: - description: TTL defines the time to live for this token. - Defaults to 24h. Expires and TTL are mutually exclusive. + description: |- + TTL defines the time to live for this token. Defaults to 24h. + Expires and TTL are mutually exclusive. type: string usages: - description: Usages describes the ways in which this - token can be used. Can by default be used for establishing - bidirectional trust, but that can be changed here. + description: |- + Usages describes the ways in which this token can be used. Can by default be used + for establishing bidirectional trust, but that can be changed here. items: type: string type: array @@ -1773,40 +1815,38 @@ spec: type: object type: array kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint of the - API server instance that's deployed on this control plane - node In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint - in the sense that ControlPlaneEndpoint is the global endpoint - for the cluster, which then loadbalances the requests to - each individual API server. This configuration object lets - you customize what IP/DNS name and port the local API server - advertises it's accessible on. By default, kubeadm tries - to auto-detect the IP of the default interface and use that, - but in case that process fails you may set the desired value - here. + description: |- + LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process + fails you may set the desired value here. properties: advertiseAddress: description: AdvertiseAddress sets the IP address for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the API - Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object nodeRegistration: - description: NodeRegistration holds fields that relate to - registering the new control-plane node to the cluster. When - used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration and - JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -1823,50 +1863,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the - kubelet command line via the environment file kubeadm - writes at runtime for the kubelet to source. This overrides - the generic base-level configuration in the kubelet-config-1.X - ConfigMap Flags have higher priority when parsing. These - values are local and specific to the node kubeadm is - executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the - Node API object that will be created in this `kubeadm - init` or `kubeadm join` operation. This field is also - used in the CommonName field of the kubelet's client - certificate to the API server. Defaults to the hostname - of the node if not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API - object should be registered with. If this field is unset, - i.e. nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: {}` - in the YAML file. This field is solely used for Node - registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has - the "effect" on any pod that does not tolerate the - Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on - pods that do not tolerate the taint. Valid effects - are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -1885,21 +1916,23 @@ spec: the join command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string caCertPath: - description: 'CACertPath is the path to the SSL certificate - authority used to secure comunications between node and - control-plane. Defaults to "/etc/kubernetes/pki/ca.crt". - TODO: revisit when there is defaulting from k/k' + description: |- + CACertPath is the path to the SSL certificate authority used to + secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + TODO: revisit when there is defaulting from k/k type: string controlPlane: - description: ControlPlane defines the additional control plane - instance to be deployed on the joining node. If nil, no - additional control plane instance will be deployed. + description: |- + ControlPlane defines the additional control plane instance to be deployed on the joining node. + If nil, no additional control plane instance will be deployed. properties: localAPIEndpoint: description: LocalAPIEndpoint represents the endpoint @@ -1910,58 +1943,57 @@ spec: for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the - API Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object type: object discovery: - description: 'Discovery specifies the options for the kubelet - to use during the TLS Bootstrap process TODO: revisit when - there is defaulting from k/k' + description: |- + Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + TODO: revisit when there is defaulting from k/k properties: bootstrapToken: - description: BootstrapToken is used to set the options - for bootstrap token based discovery BootstrapToken and - File are mutually exclusive + description: |- + BootstrapToken is used to set the options for bootstrap token based discovery + BootstrapToken and File are mutually exclusive properties: apiServerEndpoint: description: APIServerEndpoint is an IP or domain name to the API server from which info will be fetched. type: string caCertHashes: - description: 'CACertHashes specifies a set of public - key pins to verify when token-based discovery is - used. The root CA found during discovery must match - one of these values. Specifying an empty set disables - root CA pinning, which can be unsafe. Each hash - is specified as ":", where the only - currently supported type is "sha256". This is a - hex-encoded SHA-256 hash of the Subject Public Key - Info (SPKI) object in DER-encoded ASN.1. These hashes - can be calculated using, for example, OpenSSL: openssl - x509 -pubkey -in ca.crt openssl rsa -pubin -outform - der 2>&/dev/null | openssl dgst -sha256 -hex' + description: |- + CACertHashes specifies a set of public key pins to verify + when token-based discovery is used. The root CA found during discovery + must match one of these values. Specifying an empty set disables root CA + pinning, which can be unsafe. Each hash is specified as ":", + where the only currently supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + ASN.1. These hashes can be calculated using, for example, OpenSSL: + openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex items: type: string type: array token: - description: Token is a token used to validate cluster - information fetched from the control-plane. + description: |- + Token is a token used to validate cluster information + fetched from the control-plane. type: string unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows token-based - discovery without CA verification via CACertHashes. - This can weaken the security of kubeadm since other - nodes can impersonate the control-plane. + description: |- + UnsafeSkipCAVerification allows token-based discovery + without CA verification via CACertHashes. This can weaken + the security of kubeadm since other nodes can impersonate the control-plane. type: boolean required: - token type: object file: - description: File is used to specify a file or URL to - a kubeconfig file from which to load cluster information + description: |- + File is used to specify a file or URL to a kubeconfig file from which to load cluster information BootstrapToken and File are mutually exclusive properties: kubeConfigPath: @@ -1976,26 +2008,25 @@ spec: description: Timeout modifies the discovery timeout type: string tlsBootstrapToken: - description: TLSBootstrapToken is a token used for TLS - bootstrapping. If .BootstrapToken is set, this field - is defaulted to .BootstrapToken.Token, but can be overridden. - If .File is set, this field **must be set** in case - the KubeConfigFile does not contain any other authentication - information + description: |- + TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information type: string type: object kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string nodeRegistration: - description: NodeRegistration holds fields that relate to - registering the new control-plane node to the cluster. When - used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration and - JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -2012,50 +2043,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the - kubelet command line via the environment file kubeadm - writes at runtime for the kubelet to source. This overrides - the generic base-level configuration in the kubelet-config-1.X - ConfigMap Flags have higher priority when parsing. These - values are local and specific to the node kubeadm is - executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the - Node API object that will be created in this `kubeadm - init` or `kubeadm join` operation. This field is also - used in the CommonName field of the kubelet's client - certificate to the API server. Defaults to the hostname - of the node if not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API - object should be registered with. If this field is unset, - i.e. nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: {}` - in the YAML file. This field is solely used for Node - registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has - the "effect" on any pod that does not tolerate the - Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on - pods that do not tolerate the taint. Valid effects - are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -2103,13 +2125,20 @@ spec: type: string type: array useExperimentalRetryJoin: - description: "UseExperimentalRetryJoin replaces a basic kubeadm - command with a shell script with retries for joins. \n This - is meant to be an experimental temporary workaround on some - environments where joins fail due to timing (and other issues). - The long term goal is to add retries to kubeadm proper and use - that functionality. \n This will add about 40KB to userdata - \n For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055." + description: |- + UseExperimentalRetryJoin replaces a basic kubeadm command with a shell + script with retries for joins. + + + This is meant to be an experimental temporary workaround on some environments + where joins fail due to timing (and other issues). The long term goal is to add retries to + kubeadm proper and use that functionality. + + + This will add about 40KB to userdata + + + For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. type: boolean users: description: Users specifies extra users to add @@ -2164,94 +2193,108 @@ spec: type: object type: array verbosity: - description: Verbosity is the number for the kubeadm log level - verbosity. It overrides the `--v` flag in kubeadm commands. + description: |- + Verbosity is the number for the kubeadm log level verbosity. + It overrides the `--v` flag in kubeadm commands. format: int32 type: integer type: object machineTemplate: - description: MachineTemplate contains information about how machines + description: |- + MachineTemplate contains information about how machines should be shaped when creating or updating a control plane. properties: infrastructureRef: - description: InfrastructureRef is a required reference to a custom - resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part - of an object. TODO: this design is not final and this field - is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time that - the controller will spend on draining a controlplane node The - default value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different from - `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string required: - infrastructureRef type: object replicas: - description: Number of desired machines. Defaults to 1. When stacked - etcd is used only odd numbers are permitted, as per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members). + description: |- + Number of desired machines. Defaults to 1. When stacked etcd is used only + odd numbers are permitted, as per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members). This is a pointer to distinguish between explicit zero and not specified. format: int32 type: integer rolloutAfter: - description: RolloutAfter is a field to indicate a rollout should - be performed after the specified time even if no changes have been - made to the KubeadmControlPlane. + description: |- + RolloutAfter is a field to indicate a rollout should be performed + after the specified time even if no changes have been made to the + KubeadmControlPlane. format: date-time type: string rolloutStrategy: @@ -2259,27 +2302,33 @@ spec: rollingUpdate: maxSurge: 1 type: RollingUpdate - description: The RolloutStrategy to use to replace control plane machines - with new ones. + description: |- + The RolloutStrategy to use to replace control plane machines with + new ones. properties: rollingUpdate: - description: Rolling update config params. Present only if RolloutStrategyType - = RollingUpdate. + description: |- + Rolling update config params. Present only if + RolloutStrategyType = RollingUpdate. properties: maxSurge: anyOf: - type: integer - type: string - description: 'The maximum number of control planes that can - be scheduled above or under the desired number of control - planes. Value can be an absolute number 1 or 0. Defaults - to 1. Example: when this is set to 1, the control plane - can be scaled up immediately when the rolling update starts.' + description: |- + The maximum number of control planes that can be scheduled above or under the + desired number of control planes. + Value can be an absolute number 1 or 0. + Defaults to 1. + Example: when this is set to 1, the control plane can be scaled + up immediately when the rolling update starts. x-kubernetes-int-or-string: true type: object type: - description: Type of rollout. Currently the only supported strategy - is "RollingUpdate". Default is RollingUpdate. + description: |- + Type of rollout. Currently the only supported strategy is + "RollingUpdate". + Default is RollingUpdate. type: string type: object version: @@ -2300,37 +2349,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - status @@ -2338,17 +2387,20 @@ spec: type: object type: array failureMessage: - description: ErrorMessage indicates that there is a terminal problem - reconciling the state, and will be set to a descriptive error message. + description: |- + ErrorMessage indicates that there is a terminal problem reconciling the + state, and will be set to a descriptive error message. type: string failureReason: - description: FailureReason indicates that there is a terminal problem - reconciling the state, and will be set to a token value suitable - for programmatic interpretation. + description: |- + FailureReason indicates that there is a terminal problem reconciling the + state, and will be set to a token value suitable for + programmatic interpretation. type: string initialized: - description: Initialized denotes whether or not the control plane - has the uploaded kubeadm-config configmap. + description: |- + Initialized denotes whether or not the control plane has the + uploaded kubeadm-config configmap. type: boolean observedGeneration: description: ObservedGeneration is the latest generation observed @@ -2356,8 +2408,9 @@ spec: format: int64 type: integer ready: - description: Ready denotes that the KubeadmControlPlane API Server - is ready to receive requests. + description: |- + Ready denotes that the KubeadmControlPlane API Server is ready to + receive requests. type: boolean readyReplicas: description: Total number of fully running and ready control plane @@ -2365,37 +2418,42 @@ spec: format: int32 type: integer replicas: - description: Total number of non-terminated machines targeted by this - control plane (their labels match the selector). + description: |- + Total number of non-terminated machines targeted by this control plane + (their labels match the selector). format: int32 type: integer selector: - description: 'Selector is the label selector in string format to avoid - introspection by clients, and is used to provide the CRD-based integration - for the scale subresource and additional integrations for things - like kubectl describe.. The string will be in the same format as - the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors' + description: |- + Selector is the label selector in string format to avoid introspection + by clients, and is used to provide the CRD-based integration for the + scale subresource and additional integrations for things like kubectl + describe.. The string will be in the same format as the query-param syntax. + More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors type: string unavailableReplicas: - description: Total number of unavailable machines targeted by this - control plane. This is the total number of machines that are still - required for the deployment to have 100% available capacity. They - may either be machines that are running but not yet ready or machines + description: |- + Total number of unavailable machines targeted by this control plane. + This is the total number of machines that are still required for + the deployment to have 100% available capacity. They may either + be machines that are running but not yet ready or machines that still have not been created. format: int32 type: integer updatedReplicas: - description: Total number of non-terminated machines targeted by this - control plane that have the desired template spec. + description: |- + Total number of non-terminated machines targeted by this control plane + that have the desired template spec. format: int32 type: integer version: - description: Version represents the minimum Kubernetes version for - the control plane machines in the cluster. + description: |- + Version represents the minimum Kubernetes version for the control plane machines + in the cluster. type: string type: object type: object - served: true + served: false storage: false subresources: scale: @@ -2455,14 +2513,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -2470,8 +2533,9 @@ spec: description: KubeadmControlPlaneSpec defines the desired state of KubeadmControlPlane. properties: kubeadmConfigSpec: - description: KubeadmConfigSpec is a KubeadmConfigSpec to use for initializing - and joining machines to the control plane. + description: |- + KubeadmConfigSpec is a KubeadmConfigSpec + to use for initializing and joining machines to the control plane. properties: clusterConfiguration: description: ClusterConfiguration along with InitConfiguration @@ -2490,21 +2554,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass - to the control plane component. TODO: This is temporary - and ideally we would like to switch all components to - use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that - will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod @@ -2532,34 +2598,34 @@ spec: type: string type: object apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string certificatesDir: - description: 'CertificatesDir specifies where to store or - look for all required certificates. NB: if not provided, - this will default to `/etc/kubernetes/pki`' + description: |- + CertificatesDir specifies where to store or look for all required certificates. + NB: if not provided, this will default to `/etc/kubernetes/pki` type: string clusterName: description: The cluster name type: string controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP address - or DNS name for the control plane; it can be a valid IP - address or a RFC-1123 DNS subdomain, both with optional - TCP port. In case the ControlPlaneEndpoint is not specified, - the AdvertiseAddress + BindPort are used; in case the ControlPlaneEndpoint - is specified but without a TCP port, the BindPort is used. - Possible usages are: e.g. In a cluster with more than one - control plane instances, this field should be assigned the - address of the external load balancer in front of the control - plane instances. e.g. in environments with enforced node - recycling, the ControlPlaneEndpoint could be used for assigning - a stable DNS to the control plane. NB: This value defaults - to the first value in the Cluster object status.apiEndpoints - array.' + description: |- + ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + the BindPort is used. + Possible usages are: + e.g. In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. + e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane. + NB: This value defaults to the first value in the Cluster object status.apiEndpoints array. type: string controllerManager: description: ControllerManager contains extra settings for @@ -2568,21 +2634,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass - to the control plane component. TODO: This is temporary - and ideally we would like to switch all components to - use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that - will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod @@ -2610,34 +2678,35 @@ spec: in the cluster. properties: imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for the - image. In case this value is set, kubeadm does not change - automatically the version of the above components during - upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string type: object etcd: - description: 'Etcd holds configuration for etcd. NB: This - value defaults to a Local (stacked) etcd' + description: |- + Etcd holds configuration for etcd. + NB: This value defaults to a Local (stacked) etcd properties: external: - description: External describes how to connect to an external - etcd cluster Local and External are mutually exclusive + description: |- + External describes how to connect to an external etcd cluster + Local and External are mutually exclusive properties: caFile: - description: CAFile is an SSL Certificate Authority - file used to secure etcd communication. Required - if using a TLS connection. + description: |- + CAFile is an SSL Certificate Authority file used to secure etcd communication. + Required if using a TLS connection. type: string certFile: - description: CertFile is an SSL certification file - used to secure etcd communication. Required if using - a TLS connection. + description: |- + CertFile is an SSL certification file used to secure etcd communication. + Required if using a TLS connection. type: string endpoints: description: Endpoints of etcd members. Required for @@ -2646,8 +2715,9 @@ spec: type: string type: array keyFile: - description: KeyFile is an SSL key file used to secure - etcd communication. Required if using a TLS connection. + description: |- + KeyFile is an SSL key file used to secure etcd communication. + Required if using a TLS connection. type: string required: - caFile @@ -2656,30 +2726,31 @@ spec: - keyFile type: object local: - description: Local provides configuration knobs for configuring - the local etcd instance Local and External are mutually - exclusive + description: |- + Local provides configuration knobs for configuring the local etcd instance + Local and External are mutually exclusive properties: dataDir: - description: DataDir is the directory etcd will place - its data. Defaults to "/var/lib/etcd". + description: |- + DataDir is the directory etcd will place its data. + Defaults to "/var/lib/etcd". type: string extraArgs: additionalProperties: type: string - description: ExtraArgs are extra arguments provided - to the etcd binary when run inside a static pod. + description: |- + ExtraArgs are extra arguments provided to the etcd binary + when run inside a static pod. type: object imageRepository: - description: ImageRepository sets the container registry - to pull images from. if not set, the ImageRepository - defined in ClusterConfiguration will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag for - the image. In case this value is set, kubeadm does - not change automatically the version of the above - components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string peerCertSANs: description: PeerCertSANs sets extra Subject Alternative @@ -2701,53 +2772,52 @@ spec: description: FeatureGates enabled by the user. type: object imageRepository: - description: 'ImageRepository sets the container registry - to pull images from. * If not set, the default registry - of kubeadm will be used, i.e. * registry.k8s.io (new registry): - >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 * k8s.gcr.io - (old registry): all older versions Please note that when - imageRepository is not set we don''t allow upgrades to versions - >= v1.22.0 which use the old registry (k8s.gcr.io). Please - use a newer patch version with the new registry instead - (i.e. >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0). - * If the version is a CI build (kubernetes version starts - with `ci/` or `ci-cross/`) `gcr.io/k8s-staging-ci-images` - will be used as a default for control plane components and - for kube-proxy, while `registry.k8s.io` will be used for - all the other images.' + description: |- + ImageRepository sets the container registry to pull images from. + * If not set, the default registry of kubeadm will be used, i.e. + * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 + * k8s.gcr.io (old registry): all older versions + Please note that when imageRepository is not set we don't allow upgrades to + versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use + a newer patch version with the new registry instead (i.e. >= v1.22.17, + >= v1.23.15, >= v1.24.9, >= v1.25.0). + * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components + and for kube-proxy, while `registry.k8s.io` will be used for all the other images. type: string kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string kubernetesVersion: - description: 'KubernetesVersion is the target version of the - control plane. NB: This value defaults to the Machine object - spec.version' + description: |- + KubernetesVersion is the target version of the control plane. + NB: This value defaults to the Machine object spec.version type: string networking: - description: 'Networking holds configuration for the networking - topology of the cluster. NB: This value defaults to the - Cluster object spec.clusterNetwork.' + description: |- + Networking holds configuration for the networking topology of the cluster. + NB: This value defaults to the Cluster object spec.clusterNetwork. properties: dnsDomain: description: DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". type: string podSubnet: - description: PodSubnet is the subnet used by pods. If - unset, the API server will not allocate CIDR ranges - for every node. Defaults to a comma-delimited string - of the Cluster object's spec.clusterNetwork.services.cidrBlocks - if that is set + description: |- + PodSubnet is the subnet used by pods. + If unset, the API server will not allocate CIDR ranges for every node. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.services.cidrBlocks if that is set type: string serviceSubnet: - description: ServiceSubnet is the subnet used by k8s services. - Defaults to a comma-delimited string of the Cluster - object's spec.clusterNetwork.pods.cidrBlocks, or to - "10.96.0.0/12" if that's unset. + description: |- + ServiceSubnet is the subnet used by k8s services. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, or + to "10.96.0.0/12" if that's unset. type: string type: object scheduler: @@ -2757,21 +2827,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags to pass - to the control plane component. TODO: This is temporary - and ideally we would like to switch all components to - use ComponentConfig + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements describing - volumes that are mounted from the host. + description: |- + HostPathMount contains elements describing volumes that are mounted from the + host. properties: hostPath: - description: HostPath is the path in the host that - will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside the pod @@ -2822,9 +2894,9 @@ spec: be used. If set to None, no label is used. type: string overwrite: - description: Overwrite defines whether or not to overwrite - any existing filesystem. If true, any pre-existing - file system will be destroyed. Use with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition to use. @@ -2833,11 +2905,9 @@ spec: number.' type: string replaceFS: - description: 'ReplaceFS is a special directive, used - for Microsoft Azure that instructs cloud-init to replace - a file system of . NOTE: unless you define - a label, this requires the use of the ''any'' partition - directive.' + description: |- + ReplaceFS is a special directive, used for Microsoft Azure that instructs cloud-init to replace a file system of . + NOTE: unless you define a label, this requires the use of the 'any' partition directive. type: string required: - device @@ -2856,22 +2926,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. If - it is true, a single partition will be created for - the entire device. When layout is false, it means - don't partition or ignore existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to skip checks - and create the partition if a partition or filesystem - is found on the device. Use with caution. Default - is 'false'. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of partition - table. The following are supported: ''mbr'': default - and setups a MS-DOS partition table ''gpt'': setups - a GPT partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -2954,10 +3023,12 @@ spec: description: ContainerLinuxConfig contains CLC specific configuration. properties: additionalConfig: - description: "AdditionalConfig contains additional configuration - to be merged with the Ignition configuration generated - by the bootstrapper controller. More info: https://coreos.github.io/ignition/operator-notes/#config-merging - \n The data format is documented here: https://kinvolk.io/docs/flatcar-container-linux/latest/provisioning/cl-config/" + description: |- + AdditionalConfig contains additional configuration to be merged with the Ignition + configuration generated by the bootstrapper controller. More info: https://coreos.github.io/ignition/operator-notes/#config-merging + + + The data format is documented here: https://kinvolk.io/docs/flatcar-container-linux/latest/provisioning/cl-config/ type: string strict: description: Strict controls if AdditionalConfig should @@ -2970,51 +3041,52 @@ spec: are the configurations necessary for the init command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm init` - time and describes a set of Bootstrap Tokens to create. - This information IS NOT uploaded to the kubeadm cluster - configmap, partly because of its sensitive nature + description: |- + BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature items: description: BootstrapToken describes one bootstrap token, stored as a Secret in the cluster. properties: description: - description: Description sets a human-friendly message - why this token exists and what it's used for, so other - administrators can know its purpose. + description: |- + Description sets a human-friendly message why this token exists and what it's used + for, so other administrators can know its purpose. type: string expires: - description: Expires specifies the timestamp when this - token expires. Defaults to being set dynamically at - runtime based on the TTL. Expires and TTL are mutually - exclusive. + description: |- + Expires specifies the timestamp when this token expires. Defaults to being set + dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. format: date-time type: string groups: - description: Groups specifies the extra groups that - this token will authenticate as when/if used for authentication + description: |- + Groups specifies the extra groups that this token will authenticate as when/if + used for authentication items: type: string type: array token: - description: Token is used for establishing bidirectional - trust between nodes and control-planes. Used for joining - nodes in the cluster. + description: |- + Token is used for establishing bidirectional trust between nodes and control-planes. + Used for joining nodes in the cluster. type: string ttl: - description: TTL defines the time to live for this token. - Defaults to 24h. Expires and TTL are mutually exclusive. + description: |- + TTL defines the time to live for this token. Defaults to 24h. + Expires and TTL are mutually exclusive. type: string usages: - description: Usages describes the ways in which this - token can be used. Can by default be used for establishing - bidirectional trust, but that can be changed here. + description: |- + Usages describes the ways in which this token can be used. Can by default be used + for establishing bidirectional trust, but that can be changed here. items: type: string type: array @@ -3023,40 +3095,38 @@ spec: type: object type: array kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint of the - API server instance that's deployed on this control plane - node In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint - in the sense that ControlPlaneEndpoint is the global endpoint - for the cluster, which then loadbalances the requests to - each individual API server. This configuration object lets - you customize what IP/DNS name and port the local API server - advertises it's accessible on. By default, kubeadm tries - to auto-detect the IP of the default interface and use that, - but in case that process fails you may set the desired value - here. + description: |- + LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process + fails you may set the desired value here. properties: advertiseAddress: description: AdvertiseAddress sets the IP address for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the API - Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object nodeRegistration: - description: NodeRegistration holds fields that relate to - registering the new control-plane node to the cluster. When - used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration and - JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -3071,12 +3141,12 @@ spec: type: string type: array imagePullPolicy: - description: ImagePullPolicy specifies the policy for - image pulling during kubeadm "init" and "join" operations. - The value of this field must be one of "Always", "IfNotPresent" - or "Never". Defaults to "IfNotPresent". This can be - used only with Kubernetes version equal to 1.22 and - later. + description: |- + ImagePullPolicy specifies the policy for image pulling + during kubeadm "init" and "join" operations. The value of + this field must be one of "Always", "IfNotPresent" or + "Never". Defaults to "IfNotPresent". This can be used only + with Kubernetes version equal to 1.22 and later. enum: - Always - IfNotPresent @@ -3085,50 +3155,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the - kubelet command line via the environment file kubeadm - writes at runtime for the kubelet to source. This overrides - the generic base-level configuration in the kubelet-config-1.X - ConfigMap Flags have higher priority when parsing. These - values are local and specific to the node kubeadm is - executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the - Node API object that will be created in this `kubeadm - init` or `kubeadm join` operation. This field is also - used in the CommonName field of the kubelet's client - certificate to the API server. Defaults to the hostname - of the node if not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API - object should be registered with. If this field is unset, - i.e. nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: []` - in the YAML file. This field is solely used for Node - registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: []` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has - the "effect" on any pod that does not tolerate the - Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on - pods that do not tolerate the taint. Valid effects - are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -3142,33 +3203,29 @@ spec: type: array type: object patches: - description: Patches contains options related to applying - patches to components deployed by kubeadm during "kubeadm - init". The minimum kubernetes version needed to support - Patches is v1.22 + description: |- + Patches contains options related to applying patches to components deployed by kubeadm during + "kubeadm init". The minimum kubernetes version needed to support Patches is v1.22 properties: directory: - description: Directory is a path to a directory that contains - files named "target[suffix][+patchtype].extension". - For example, "kube-apiserver0+merge.yaml" or just "etcd.json". - "target" can be one of "kube-apiserver", "kube-controller-manager", - "kube-scheduler", "etcd". "patchtype" can be one of - "strategic" "merge" or "json" and they match the patch - formats supported by kubectl. The default "patchtype" - is "strategic". "extension" must be either "json" or - "yaml". "suffix" is an optional string that can be used - to determine which patches are applied first alpha-numerically. - These files can be written into the target directory - via KubeadmConfig.Files which specifies additional files - to be created on the machine, either with content inline - or by referencing a secret. + description: |- + Directory is a path to a directory that contains files named "target[suffix][+patchtype].extension". + For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of + "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one + of "strategic" "merge" or "json" and they match the patch formats supported by kubectl. + The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". + "suffix" is an optional string that can be used to determine which patches are applied + first alpha-numerically. + These files can be written into the target directory via KubeadmConfig.Files which + specifies additional files to be created on the machine, either with content inline or + by referencing a secret. type: string type: object skipPhases: - description: SkipPhases is a list of phases to skip during - command execution. The list of phases can be obtained with - the "kubeadm init --help" command. This option takes effect - only on Kubernetes >=1.22.0. + description: |- + SkipPhases is a list of phases to skip during command execution. + The list of phases can be obtained with the "kubeadm init --help" command. + This option takes effect only on Kubernetes >=1.22.0. items: type: string type: array @@ -3178,21 +3235,23 @@ spec: the join command properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string caCertPath: - description: 'CACertPath is the path to the SSL certificate - authority used to secure comunications between node and - control-plane. Defaults to "/etc/kubernetes/pki/ca.crt". - TODO: revisit when there is defaulting from k/k' + description: |- + CACertPath is the path to the SSL certificate authority used to + secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + TODO: revisit when there is defaulting from k/k type: string controlPlane: - description: ControlPlane defines the additional control plane - instance to be deployed on the joining node. If nil, no - additional control plane instance will be deployed. + description: |- + ControlPlane defines the additional control plane instance to be deployed on the joining node. + If nil, no additional control plane instance will be deployed. properties: localAPIEndpoint: description: LocalAPIEndpoint represents the endpoint @@ -3203,58 +3262,57 @@ spec: for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for the - API Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object type: object discovery: - description: 'Discovery specifies the options for the kubelet - to use during the TLS Bootstrap process TODO: revisit when - there is defaulting from k/k' + description: |- + Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + TODO: revisit when there is defaulting from k/k properties: bootstrapToken: - description: BootstrapToken is used to set the options - for bootstrap token based discovery BootstrapToken and - File are mutually exclusive + description: |- + BootstrapToken is used to set the options for bootstrap token based discovery + BootstrapToken and File are mutually exclusive properties: apiServerEndpoint: description: APIServerEndpoint is an IP or domain name to the API server from which info will be fetched. type: string caCertHashes: - description: 'CACertHashes specifies a set of public - key pins to verify when token-based discovery is - used. The root CA found during discovery must match - one of these values. Specifying an empty set disables - root CA pinning, which can be unsafe. Each hash - is specified as ":", where the only - currently supported type is "sha256". This is a - hex-encoded SHA-256 hash of the Subject Public Key - Info (SPKI) object in DER-encoded ASN.1. These hashes - can be calculated using, for example, OpenSSL: openssl - x509 -pubkey -in ca.crt openssl rsa -pubin -outform - der 2>&/dev/null | openssl dgst -sha256 -hex' + description: |- + CACertHashes specifies a set of public key pins to verify + when token-based discovery is used. The root CA found during discovery + must match one of these values. Specifying an empty set disables root CA + pinning, which can be unsafe. Each hash is specified as ":", + where the only currently supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + ASN.1. These hashes can be calculated using, for example, OpenSSL: + openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex items: type: string type: array token: - description: Token is a token used to validate cluster - information fetched from the control-plane. + description: |- + Token is a token used to validate cluster information + fetched from the control-plane. type: string unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows token-based - discovery without CA verification via CACertHashes. - This can weaken the security of kubeadm since other - nodes can impersonate the control-plane. + description: |- + UnsafeSkipCAVerification allows token-based discovery + without CA verification via CACertHashes. This can weaken + the security of kubeadm since other nodes can impersonate the control-plane. type: boolean required: - token type: object file: - description: File is used to specify a file or URL to - a kubeconfig file from which to load cluster information + description: |- + File is used to specify a file or URL to a kubeconfig file from which to load cluster information BootstrapToken and File are mutually exclusive properties: kubeConfigPath: @@ -3269,26 +3327,25 @@ spec: description: Timeout modifies the discovery timeout type: string tlsBootstrapToken: - description: TLSBootstrapToken is a token used for TLS - bootstrapping. If .BootstrapToken is set, this field - is defaulted to .BootstrapToken.Token, but can be overridden. - If .File is set, this field **must be set** in case - the KubeConfigFile does not contain any other authentication - information + description: |- + TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information type: string type: object kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string nodeRegistration: - description: NodeRegistration holds fields that relate to - registering the new control-plane node to the cluster. When - used in the context of control plane nodes, NodeRegistration - should remain consistent across both InitConfiguration and - JoinConfiguration + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent + across both InitConfiguration and JoinConfiguration properties: criSocket: description: CRISocket is used to retrieve container runtime @@ -3303,12 +3360,12 @@ spec: type: string type: array imagePullPolicy: - description: ImagePullPolicy specifies the policy for - image pulling during kubeadm "init" and "join" operations. - The value of this field must be one of "Always", "IfNotPresent" - or "Never". Defaults to "IfNotPresent". This can be - used only with Kubernetes version equal to 1.22 and - later. + description: |- + ImagePullPolicy specifies the policy for image pulling + during kubeadm "init" and "join" operations. The value of + this field must be one of "Always", "IfNotPresent" or + "Never". Defaults to "IfNotPresent". This can be used only + with Kubernetes version equal to 1.22 and later. enum: - Always - IfNotPresent @@ -3317,50 +3374,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the - kubelet command line via the environment file kubeadm - writes at runtime for the kubelet to source. This overrides - the generic base-level configuration in the kubelet-config-1.X - ConfigMap Flags have higher priority when parsing. These - values are local and specific to the node kubeadm is - executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field of the - Node API object that will be created in this `kubeadm - init` or `kubeadm join` operation. This field is also - used in the CommonName field of the kubelet's client - certificate to the API server. Defaults to the hostname - of the node if not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the Node API - object should be registered with. If this field is unset, - i.e. nil, in the `kubeadm init` process it will be defaulted - to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane node, - set this field to an empty slice, i.e. `taints: []` - in the YAML file. This field is solely used for Node - registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: []` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached to has - the "effect" on any pod that does not tolerate the - Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on - pods that do not tolerate the taint. Valid effects - are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which - the taint was added. It is only written for NoExecute - taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -3374,33 +3422,29 @@ spec: type: array type: object patches: - description: Patches contains options related to applying - patches to components deployed by kubeadm during "kubeadm - join". The minimum kubernetes version needed to support - Patches is v1.22 + description: |- + Patches contains options related to applying patches to components deployed by kubeadm during + "kubeadm join". The minimum kubernetes version needed to support Patches is v1.22 properties: directory: - description: Directory is a path to a directory that contains - files named "target[suffix][+patchtype].extension". - For example, "kube-apiserver0+merge.yaml" or just "etcd.json". - "target" can be one of "kube-apiserver", "kube-controller-manager", - "kube-scheduler", "etcd". "patchtype" can be one of - "strategic" "merge" or "json" and they match the patch - formats supported by kubectl. The default "patchtype" - is "strategic". "extension" must be either "json" or - "yaml". "suffix" is an optional string that can be used - to determine which patches are applied first alpha-numerically. - These files can be written into the target directory - via KubeadmConfig.Files which specifies additional files - to be created on the machine, either with content inline - or by referencing a secret. + description: |- + Directory is a path to a directory that contains files named "target[suffix][+patchtype].extension". + For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of + "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one + of "strategic" "merge" or "json" and they match the patch formats supported by kubectl. + The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". + "suffix" is an optional string that can be used to determine which patches are applied + first alpha-numerically. + These files can be written into the target directory via KubeadmConfig.Files which + specifies additional files to be created on the machine, either with content inline or + by referencing a secret. type: string type: object skipPhases: - description: SkipPhases is a list of phases to skip during - command execution. The list of phases can be obtained with - the "kubeadm init --help" command. This option takes effect - only on Kubernetes >=1.22.0. + description: |- + SkipPhases is a list of phases to skip during command execution. + The list of phases can be obtained with the "kubeadm init --help" command. + This option takes effect only on Kubernetes >=1.22.0. items: type: string type: array @@ -3439,16 +3483,24 @@ spec: type: string type: array useExperimentalRetryJoin: - description: "UseExperimentalRetryJoin replaces a basic kubeadm - command with a shell script with retries for joins. \n This - is meant to be an experimental temporary workaround on some - environments where joins fail due to timing (and other issues). - The long term goal is to add retries to kubeadm proper and use - that functionality. \n This will add about 40KB to userdata - \n For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. - \n Deprecated: This experimental fix is no longer needed and - this field will be removed in a future release. When removing - also remove from staticcheck exclude-rules for SA1019 in golangci.yml" + description: |- + UseExperimentalRetryJoin replaces a basic kubeadm command with a shell + script with retries for joins. + + + This is meant to be an experimental temporary workaround on some environments + where joins fail due to timing (and other issues). The long term goal is to add retries to + kubeadm proper and use that functionality. + + + This will add about 40KB to userdata + + + For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. + + + Deprecated: This experimental fix is no longer needed and this field will be removed in a future release. + When removing also remove from staticcheck exclude-rules for SA1019 in golangci.yml type: boolean users: description: Users specifies extra users to add @@ -3526,94 +3578,103 @@ spec: type: object type: array verbosity: - description: Verbosity is the number for the kubeadm log level - verbosity. It overrides the `--v` flag in kubeadm commands. + description: |- + Verbosity is the number for the kubeadm log level verbosity. + It overrides the `--v` flag in kubeadm commands. format: int32 type: integer type: object machineTemplate: - description: MachineTemplate contains information about how machines + description: |- + MachineTemplate contains information about how machines should be shaped when creating or updating a control plane. properties: infrastructureRef: - description: InfrastructureRef is a required reference to a custom - resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part - of an object. TODO: this design is not final and this field - is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object nodeDeletionTimeout: - description: NodeDeletionTimeout defines how long the machine - controller will attempt to delete the Node that the Machine - hosts after the Machine is marked for deletion. A duration of - 0 will retry deletion indefinitely. If no value is provided, - the default value for this property of the Machine resource - will be used. + description: |- + NodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + If no value is provided, the default value for this property of the Machine resource will be used. type: string nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of time that - the controller will spend on draining a controlplane node The - default value is 0, meaning that the node can be drained without - any time limitations. NOTE: NodeDrainTimeout is different from - `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string nodeVolumeDetachTimeout: - description: NodeVolumeDetachTimeout is the total amount of time - that the controller will spend on waiting for all volumes to - be detached. The default value is 0, meaning that the volumes - can be detached without any time limitations. + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. type: string required: - infrastructureRef @@ -3624,65 +3685,75 @@ spec: properties: maxRetry: description: "MaxRetry is the Max number of retries while attempting - to remediate an unhealthy machine. A retry happens when a machine + to remediate an unhealthy machine.\nA retry happens when a machine that was created as a replacement for an unhealthy machine also - fails. For example, given a control plane with three machines - M1, M2, M3: \n M1 become unhealthy; remediation happens, and - M1-1 is created as a replacement. If M1-1 (replacement of M1) - has problems while bootstrapping it will become unhealthy, and - then be remediated; such operation is considered a retry, remediation-retry - #1. If M1-2 (replacement of M1-2) becomes unhealthy, remediation-retry - #2 will happen, etc. \n A retry could happen only after RetryPeriod - from the previous retry. If a machine is marked as unhealthy - after MinHealthyPeriod from the previous remediation expired, - this is not considered a retry anymore because the new issue - is assumed unrelated from the previous one. \n If not set, the - remedation will be retried infinitely." + fails.\nFor example, given a control plane with three machines + M1, M2, M3:\n\n\n\tM1 become unhealthy; remediation happens, + and M1-1 is created as a replacement.\n\tIf M1-1 (replacement + of M1) has problems while bootstrapping it will become unhealthy, + and then be\n\tremediated; such operation is considered a retry, + remediation-retry #1.\n\tIf M1-2 (replacement of M1-1) becomes + unhealthy, remediation-retry #2 will happen, etc.\n\n\nA retry + could happen only after RetryPeriod from the previous retry.\nIf + a machine is marked as unhealthy after MinHealthyPeriod from + the previous remediation expired,\nthis is not considered a + retry anymore because the new issue is assumed unrelated from + the previous one.\n\n\nIf not set, the remedation will be retried + infinitely." format: int32 type: integer minHealthyPeriod: description: "MinHealthyPeriod defines the duration after which - KCP will consider any failure to a machine unrelated from the + KCP will consider any failure to a machine unrelated\nfrom the previous one. In this case the remediation is not considered - a retry anymore, and thus the retry counter restarts from 0. - For example, assuming MinHealthyPeriod is set to 1h (default) - \n M1 become unhealthy; remediation happens, and M1-1 is created - as a replacement. If M1-1 (replacement of M1) has problems within - the 1hr after the creation, also this machine will be remediated - and this operation is considered a retry - a problem related - to the original issue happened to M1 -. \n If instead the problem + a retry anymore, and thus the retry\ncounter restarts from 0. + For example, assuming MinHealthyPeriod is set to 1h (default)\n\n\n\tM1 + become unhealthy; remediation happens, and M1-1 is created as + a replacement.\n\tIf M1-1 (replacement of M1) has problems within + the 1hr after the creation, also\n\tthis machine will be remediated + and this operation is considered a retry - a problem related\n\tto + the original issue happened to M1 -.\n\n\n\tIf instead the problem on M1-1 is happening after MinHealthyPeriod expired, e.g. four - days after m1-1 has been created as a remediation of M1, the - problem on M1-1 is considered unrelated to the original issue - happened to M1. \n If not set, this value is defaulted to 1h." + days after\n\tm1-1 has been created as a remediation of M1, + the problem on M1-1 is considered unrelated to\n\tthe original + issue happened to M1.\n\n\nIf not set, this value is defaulted + to 1h." type: string retryPeriod: - description: "RetryPeriod is the duration that KCP should wait - before remediating a machine being created as a replacement - for an unhealthy machine (a retry). \n If not set, a retry will - happen immediately." + description: |- + RetryPeriod is the duration that KCP should wait before remediating a machine being created as a replacement + for an unhealthy machine (a retry). + + + If not set, a retry will happen immediately. type: string type: object replicas: - description: Number of desired machines. Defaults to 1. When stacked - etcd is used only odd numbers are permitted, as per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members). + description: |- + Number of desired machines. Defaults to 1. When stacked etcd is used only + odd numbers are permitted, as per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members). This is a pointer to distinguish between explicit zero and not specified. format: int32 type: integer rolloutAfter: - description: RolloutAfter is a field to indicate a rollout should - be performed after the specified time even if no changes have been - made to the KubeadmControlPlane. + description: |- + RolloutAfter is a field to indicate a rollout should be performed + after the specified time even if no changes have been made to the + KubeadmControlPlane. + Example: In the YAML the time can be specified in the RFC3339 format. + To specify the rolloutAfter target as March 9, 2023, at 9 am UTC + use "2023-03-09T09:00:00Z". format: date-time type: string rolloutBefore: - description: RolloutBefore is a field to indicate a rollout should - be performed if the specified criteria is met. + description: |- + RolloutBefore is a field to indicate a rollout should be performed + if the specified criteria is met. properties: certificatesExpiryDays: - description: CertificatesExpiryDays indicates a rollout needs - to be performed if the certificates of the machine will expire - within the specified days. + description: |- + CertificatesExpiryDays indicates a rollout needs to be performed if the + certificates of the machine will expire within the specified days. format: int32 type: integer type: object @@ -3691,37 +3762,43 @@ spec: rollingUpdate: maxSurge: 1 type: RollingUpdate - description: The RolloutStrategy to use to replace control plane machines - with new ones. + description: |- + The RolloutStrategy to use to replace control plane machines with + new ones. properties: rollingUpdate: - description: Rolling update config params. Present only if RolloutStrategyType - = RollingUpdate. + description: |- + Rolling update config params. Present only if + RolloutStrategyType = RollingUpdate. properties: maxSurge: anyOf: - type: integer - type: string - description: 'The maximum number of control planes that can - be scheduled above or under the desired number of control - planes. Value can be an absolute number 1 or 0. Defaults - to 1. Example: when this is set to 1, the control plane - can be scaled up immediately when the rolling update starts.' + description: |- + The maximum number of control planes that can be scheduled above or under the + desired number of control planes. + Value can be an absolute number 1 or 0. + Defaults to 1. + Example: when this is set to 1, the control plane can be scaled + up immediately when the rolling update starts. x-kubernetes-int-or-string: true type: object type: - description: Type of rollout. Currently the only supported strategy - is "RollingUpdate". Default is RollingUpdate. + description: |- + Type of rollout. Currently the only supported strategy is + "RollingUpdate". + Default is RollingUpdate. type: string type: object version: - description: 'Version defines the desired Kubernetes version. Please - note that if kubeadmConfigSpec.ClusterConfiguration.imageRepository - is not set we don''t allow upgrades to versions >= v1.22.0 for which - kubeadm uses the old registry (k8s.gcr.io). Please use a newer patch - version with the new registry instead. The default registries of - kubeadm are: * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, - >= v1.24.9, >= v1.25.0 * k8s.gcr.io (old registry): all older versions' + description: |- + Version defines the desired Kubernetes version. + Please note that if kubeadmConfigSpec.ClusterConfiguration.imageRepository is not set + we don't allow upgrades to versions >= v1.22.0 for which kubeadm uses the old registry (k8s.gcr.io). + Please use a newer patch version with the new registry instead. The default registries of kubeadm are: + * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 + * k8s.gcr.io (old registry): all older versions type: string required: - kubeadmConfigSpec @@ -3738,37 +3815,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -3777,17 +3854,20 @@ spec: type: object type: array failureMessage: - description: ErrorMessage indicates that there is a terminal problem - reconciling the state, and will be set to a descriptive error message. + description: |- + ErrorMessage indicates that there is a terminal problem reconciling the + state, and will be set to a descriptive error message. type: string failureReason: - description: FailureReason indicates that there is a terminal problem - reconciling the state, and will be set to a token value suitable - for programmatic interpretation. + description: |- + FailureReason indicates that there is a terminal problem reconciling the + state, and will be set to a token value suitable for + programmatic interpretation. type: string initialized: - description: Initialized denotes whether or not the control plane - has the uploaded kubeadm-config configmap. + description: |- + Initialized denotes whether or not the control plane has the + uploaded kubeadm-config configmap. type: boolean lastRemediation: description: LastRemediation stores info about last remediation performed. @@ -3797,10 +3877,9 @@ spec: being remediated. type: string retryCount: - description: RetryCount used to keep track of remediation retry - for the last remediated machine. A retry happens when a machine - that was created as a replacement for an unhealthy machine also - fails. + description: |- + RetryCount used to keep track of remediation retry for the last remediated machine. + A retry happens when a machine that was created as a replacement for an unhealthy machine also fails. format: int32 type: integer timestamp: @@ -3819,8 +3898,12 @@ spec: format: int64 type: integer ready: - description: Ready denotes that the KubeadmControlPlane API Server - is ready to receive requests. + description: |- + Ready denotes that the KubeadmControlPlane API Server became ready during initial provisioning + to receive requests. + NOTE: this field is part of the Cluster API contract and it is used to orchestrate provisioning. + The value of this field is never updated after provisioning is completed. Please use conditions + to check the operational state of the control plane. type: boolean readyReplicas: description: Total number of fully running and ready control plane @@ -3828,33 +3911,38 @@ spec: format: int32 type: integer replicas: - description: Total number of non-terminated machines targeted by this - control plane (their labels match the selector). + description: |- + Total number of non-terminated machines targeted by this control plane + (their labels match the selector). format: int32 type: integer selector: - description: 'Selector is the label selector in string format to avoid - introspection by clients, and is used to provide the CRD-based integration - for the scale subresource and additional integrations for things - like kubectl describe.. The string will be in the same format as - the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors' + description: |- + Selector is the label selector in string format to avoid introspection + by clients, and is used to provide the CRD-based integration for the + scale subresource and additional integrations for things like kubectl + describe.. The string will be in the same format as the query-param syntax. + More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors type: string unavailableReplicas: - description: Total number of unavailable machines targeted by this - control plane. This is the total number of machines that are still - required for the deployment to have 100% available capacity. They - may either be machines that are running but not yet ready or machines + description: |- + Total number of unavailable machines targeted by this control plane. + This is the total number of machines that are still required for + the deployment to have 100% available capacity. They may either + be machines that are running but not yet ready or machines that still have not been created. format: int32 type: integer updatedReplicas: - description: Total number of non-terminated machines targeted by this - control plane that have the desired template spec. + description: |- + Total number of non-terminated machines targeted by this control plane + that have the desired template spec. format: int32 type: integer version: - description: Version represents the minimum Kubernetes version for - the control plane machines in the cluster. + description: |- + Version represents the minimum Kubernetes version for the control plane machines + in the cluster. type: string type: object type: object diff --git a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml index 39c10a4e0f0e..44f0656ef654 100644 --- a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml +++ b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 name: kubeadmcontrolplanetemplates.controlplane.cluster.x-k8s.io spec: group: controlplane.cluster.x-k8s.io @@ -22,21 +21,30 @@ spec: jsonPath: .metadata.creationTimestamp name: Age type: date + deprecated: true name: v1alpha4 schema: openAPIV3Schema: - description: "KubeadmControlPlaneTemplate is the Schema for the kubeadmcontrolplanetemplates - API. \n Deprecated: This type will be removed in one of the next releases." + description: |- + KubeadmControlPlaneTemplate is the Schema for the kubeadmcontrolplanetemplates API. + + + Deprecated: This type will be removed in one of the next releases. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -53,8 +61,9 @@ spec: of KubeadmControlPlane. properties: kubeadmConfigSpec: - description: KubeadmConfigSpec is a KubeadmConfigSpec to use - for initializing and joining machines to the control plane. + description: |- + KubeadmConfigSpec is a KubeadmConfigSpec + to use for initializing and joining machines to the control plane. properties: clusterConfiguration: description: ClusterConfiguration along with InitConfiguration @@ -73,23 +82,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags - to pass to the control plane component. TODO: - This is temporary and ideally we would like - to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements - describing volumes that are mounted from the + description: |- + HostPathMount contains elements describing volumes that are mounted from the host. properties: hostPath: - description: HostPath is the path in the - host that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside @@ -119,37 +128,34 @@ spec: type: string type: object apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal - value, and may reject unrecognized values. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string certificatesDir: - description: 'CertificatesDir specifies where to store - or look for all required certificates. NB: if not - provided, this will default to `/etc/kubernetes/pki`' + description: |- + CertificatesDir specifies where to store or look for all required certificates. + NB: if not provided, this will default to `/etc/kubernetes/pki` type: string clusterName: description: The cluster name type: string controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP - address or DNS name for the control plane; it can - be a valid IP address or a RFC-1123 DNS subdomain, - both with optional TCP port. In case the ControlPlaneEndpoint - is not specified, the AdvertiseAddress + BindPort - are used; in case the ControlPlaneEndpoint is specified - but without a TCP port, the BindPort is used. Possible - usages are: e.g. In a cluster with more than one - control plane instances, this field should be assigned - the address of the external load balancer in front - of the control plane instances. e.g. in environments - with enforced node recycling, the ControlPlaneEndpoint - could be used for assigning a stable DNS to the - control plane. NB: This value defaults to the first - value in the Cluster object status.apiEndpoints - array.' + description: |- + ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + the BindPort is used. + Possible usages are: + e.g. In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. + e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane. + NB: This value defaults to the first value in the Cluster object status.apiEndpoints array. type: string controllerManager: description: ControllerManager contains extra settings @@ -158,23 +164,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags - to pass to the control plane component. TODO: - This is temporary and ideally we would like - to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements - describing volumes that are mounted from the + description: |- + HostPathMount contains elements describing volumes that are mounted from the host. properties: hostPath: - description: HostPath is the path in the - host that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside @@ -204,35 +210,34 @@ spec: installed in the cluster. properties: imageRepository: - description: ImageRepository sets the container - registry to pull images from. if not set, the - ImageRepository defined in ClusterConfiguration - will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag - for the image. In case this value is set, kubeadm - does not change automatically the version of - the above components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string type: object etcd: - description: 'Etcd holds configuration for etcd. NB: - This value defaults to a Local (stacked) etcd' + description: |- + Etcd holds configuration for etcd. + NB: This value defaults to a Local (stacked) etcd properties: external: - description: External describes how to connect - to an external etcd cluster Local and External - are mutually exclusive + description: |- + External describes how to connect to an external etcd cluster + Local and External are mutually exclusive properties: caFile: - description: CAFile is an SSL Certificate - Authority file used to secure etcd communication. + description: |- + CAFile is an SSL Certificate Authority file used to secure etcd communication. Required if using a TLS connection. type: string certFile: - description: CertFile is an SSL certification - file used to secure etcd communication. + description: |- + CertFile is an SSL certification file used to secure etcd communication. Required if using a TLS connection. type: string endpoints: @@ -242,9 +247,9 @@ spec: type: string type: array keyFile: - description: KeyFile is an SSL key file used - to secure etcd communication. Required if - using a TLS connection. + description: |- + KeyFile is an SSL key file used to secure etcd communication. + Required if using a TLS connection. type: string required: - caFile @@ -253,33 +258,31 @@ spec: - keyFile type: object local: - description: Local provides configuration knobs - for configuring the local etcd instance Local - and External are mutually exclusive + description: |- + Local provides configuration knobs for configuring the local etcd instance + Local and External are mutually exclusive properties: dataDir: - description: DataDir is the directory etcd - will place its data. Defaults to "/var/lib/etcd". + description: |- + DataDir is the directory etcd will place its data. + Defaults to "/var/lib/etcd". type: string extraArgs: additionalProperties: type: string - description: ExtraArgs are extra arguments - provided to the etcd binary when run inside - a static pod. + description: |- + ExtraArgs are extra arguments provided to the etcd binary + when run inside a static pod. type: object imageRepository: - description: ImageRepository sets the container - registry to pull images from. if not set, - the ImageRepository defined in ClusterConfiguration - will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a - tag for the image. In case this value is - set, kubeadm does not change automatically - the version of the above components during - upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string peerCertSANs: description: PeerCertSANs sets extra Subject @@ -303,48 +306,45 @@ spec: description: FeatureGates enabled by the user. type: object imageRepository: - description: ImageRepository sets the container registry - to pull images from. If empty, `registry.k8s.io` - will be used by default; in case of kubernetes version - is a CI build (kubernetes version starts with `ci/` - or `ci-cross/`) `gcr.io/k8s-staging-ci-images` will - be used as a default for control plane components - and for kube-proxy, while `registry.k8s.io` will - be used for all the other images. + description: |- + ImageRepository sets the container registry to pull images from. + If empty, `registry.k8s.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components and for kube-proxy, while `registry.k8s.io` + will be used for all the other images. type: string kind: - description: 'Kind is a string value representing - the REST resource this object represents. Servers - may infer this from the endpoint the client submits - requests to. Cannot be updated. In CamelCase. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string kubernetesVersion: - description: 'KubernetesVersion is the target version - of the control plane. NB: This value defaults to - the Machine object spec.version' + description: |- + KubernetesVersion is the target version of the control plane. + NB: This value defaults to the Machine object spec.version type: string networking: - description: 'Networking holds configuration for the - networking topology of the cluster. NB: This value - defaults to the Cluster object spec.clusterNetwork.' + description: |- + Networking holds configuration for the networking topology of the cluster. + NB: This value defaults to the Cluster object spec.clusterNetwork. properties: dnsDomain: description: DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". type: string podSubnet: - description: PodSubnet is the subnet used by pods. - If unset, the API server will not allocate CIDR - ranges for every node. Defaults to a comma-delimited - string of the Cluster object's spec.clusterNetwork.services.cidrBlocks - if that is set + description: |- + PodSubnet is the subnet used by pods. + If unset, the API server will not allocate CIDR ranges for every node. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.services.cidrBlocks if that is set type: string serviceSubnet: - description: ServiceSubnet is the subnet used - by k8s services. Defaults to a comma-delimited - string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, - or to "10.96.0.0/12" if that's unset. + description: |- + ServiceSubnet is the subnet used by k8s services. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, or + to "10.96.0.0/12" if that's unset. type: string type: object scheduler: @@ -354,23 +354,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags - to pass to the control plane component. TODO: - This is temporary and ideally we would like - to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements - describing volumes that are mounted from the + description: |- + HostPathMount contains elements describing volumes that are mounted from the host. properties: hostPath: - description: HostPath is the path in the - host that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside @@ -427,10 +427,9 @@ spec: is used. type: string overwrite: - description: Overwrite defines whether or not - to overwrite any existing filesystem. If true, - any pre-existing file system will be destroyed. - Use with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition @@ -439,11 +438,9 @@ spec: is the actual partition number.' type: string replaceFS: - description: 'ReplaceFS is a special directive, - used for Microsoft Azure that instructs cloud-init - to replace a file system of . NOTE: - unless you define a label, this requires the - use of the ''any'' partition directive.' + description: |- + ReplaceFS is a special directive, used for Microsoft Azure that instructs cloud-init to replace a file system of . + NOTE: unless you define a label, this requires the use of the 'any' partition directive. type: string required: - device @@ -462,23 +459,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. - If it is true, a single partition will be - created for the entire device. When layout - is false, it means don't partition or ignore - existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to - skip checks and create the partition if a - partition or filesystem is found on the device. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of - partition table. The following are supported: - ''mbr'': default and setups a MS-DOS partition - table ''gpt'': setups a GPT partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -555,57 +550,52 @@ spec: are the configurations necessary for the init command properties: apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal - value, and may reject unrecognized values. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm - init` time and describes a set of Bootstrap Tokens - to create. This information IS NOT uploaded to the - kubeadm cluster configmap, partly because of its - sensitive nature + description: |- + BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature items: description: BootstrapToken describes one bootstrap token, stored as a Secret in the cluster. properties: description: - description: Description sets a human-friendly - message why this token exists and what it's - used for, so other administrators can know - its purpose. + description: |- + Description sets a human-friendly message why this token exists and what it's used + for, so other administrators can know its purpose. type: string expires: - description: Expires specifies the timestamp - when this token expires. Defaults to being - set dynamically at runtime based on the TTL. - Expires and TTL are mutually exclusive. + description: |- + Expires specifies the timestamp when this token expires. Defaults to being set + dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. format: date-time type: string groups: - description: Groups specifies the extra groups - that this token will authenticate as when/if + description: |- + Groups specifies the extra groups that this token will authenticate as when/if used for authentication items: type: string type: array token: - description: Token is used for establishing - bidirectional trust between nodes and control-planes. + description: |- + Token is used for establishing bidirectional trust between nodes and control-planes. Used for joining nodes in the cluster. type: string ttl: - description: TTL defines the time to live for - this token. Defaults to 24h. Expires and TTL - are mutually exclusive. + description: |- + TTL defines the time to live for this token. Defaults to 24h. + Expires and TTL are mutually exclusive. type: string usages: - description: Usages describes the ways in which - this token can be used. Can by default be - used for establishing bidirectional trust, - but that can be changed here. + description: |- + Usages describes the ways in which this token can be used. Can by default be used + for establishing bidirectional trust, but that can be changed here. items: type: string type: array @@ -614,42 +604,37 @@ spec: type: object type: array kind: - description: 'Kind is a string value representing - the REST resource this object represents. Servers - may infer this from the endpoint the client submits - requests to. Cannot be updated. In CamelCase. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint - of the API server instance that's deployed on this - control plane node In HA setups, this differs from - ClusterConfiguration.ControlPlaneEndpoint in the - sense that ControlPlaneEndpoint is the global endpoint - for the cluster, which then loadbalances the requests - to each individual API server. This configuration - object lets you customize what IP/DNS name and port - the local API server advertises it's accessible - on. By default, kubeadm tries to auto-detect the - IP of the default interface and use that, but in - case that process fails you may set the desired - value here. + description: |- + LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process + fails you may set the desired value here. properties: advertiseAddress: description: AdvertiseAddress sets the IP address for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for - the API Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object nodeRegistration: - description: NodeRegistration holds fields that relate - to registering the new control-plane node to the - cluster. When used in the context of control plane - nodes, NodeRegistration should remain consistent + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent across both InitConfiguration and JoinConfiguration properties: criSocket: @@ -667,53 +652,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra - arguments to the kubelet. The arguments here - are passed to the kubelet command line via the - environment file kubeadm writes at runtime for - the kubelet to source. This overrides the generic - base-level configuration in the kubelet-config-1.X - ConfigMap Flags have higher priority when parsing. - These values are local and specific to the node - kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field - of the Node API object that will be created - in this `kubeadm init` or `kubeadm join` operation. - This field is also used in the CommonName field - of the kubelet's client certificate to the API - server. Defaults to the hostname of the node - if not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the - Node API object should be registered with. If - this field is unset, i.e. nil, in the `kubeadm - init` process it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane - node, set this field to an empty slice, i.e. - `taints: {}` in the YAML file. This field is - solely used for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached - to has the "effect" on any pod that does not - tolerate the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the - taint on pods that do not tolerate the - taint. Valid effects are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time - at which the taint was added. It is only - written for NoExecute taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -732,23 +705,23 @@ spec: for the join command properties: apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal - value, and may reject unrecognized values. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string caCertPath: - description: 'CACertPath is the path to the SSL certificate - authority used to secure comunications between node - and control-plane. Defaults to "/etc/kubernetes/pki/ca.crt". - TODO: revisit when there is defaulting from k/k' + description: |- + CACertPath is the path to the SSL certificate authority used to + secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + TODO: revisit when there is defaulting from k/k type: string controlPlane: - description: ControlPlane defines the additional control - plane instance to be deployed on the joining node. - If nil, no additional control plane instance will - be deployed. + description: |- + ControlPlane defines the additional control plane instance to be deployed on the joining node. + If nil, no additional control plane instance will be deployed. properties: localAPIEndpoint: description: LocalAPIEndpoint represents the endpoint @@ -760,21 +733,21 @@ spec: address for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port - for the API Server to bind to. Defaults - to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object type: object discovery: - description: 'Discovery specifies the options for - the kubelet to use during the TLS Bootstrap process - TODO: revisit when there is defaulting from k/k' + description: |- + Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + TODO: revisit when there is defaulting from k/k properties: bootstrapToken: - description: BootstrapToken is used to set the - options for bootstrap token based discovery + description: |- + BootstrapToken is used to set the options for bootstrap token based discovery BootstrapToken and File are mutually exclusive properties: apiServerEndpoint: @@ -783,43 +756,36 @@ spec: info will be fetched. type: string caCertHashes: - description: 'CACertHashes specifies a set - of public key pins to verify when token-based - discovery is used. The root CA found during - discovery must match one of these values. - Specifying an empty set disables root CA - pinning, which can be unsafe. Each hash - is specified as ":", where - the only currently supported type is "sha256". - This is a hex-encoded SHA-256 hash of the - Subject Public Key Info (SPKI) object in - DER-encoded ASN.1. These hashes can be calculated - using, for example, OpenSSL: openssl x509 - -pubkey -in ca.crt openssl rsa -pubin -outform - der 2>&/dev/null | openssl dgst -sha256 - -hex' + description: |- + CACertHashes specifies a set of public key pins to verify + when token-based discovery is used. The root CA found during discovery + must match one of these values. Specifying an empty set disables root CA + pinning, which can be unsafe. Each hash is specified as ":", + where the only currently supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + ASN.1. These hashes can be calculated using, for example, OpenSSL: + openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex items: type: string type: array token: - description: Token is a token used to validate - cluster information fetched from the control-plane. + description: |- + Token is a token used to validate cluster information + fetched from the control-plane. type: string unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows - token-based discovery without CA verification - via CACertHashes. This can weaken the security - of kubeadm since other nodes can impersonate - the control-plane. + description: |- + UnsafeSkipCAVerification allows token-based discovery + without CA verification via CACertHashes. This can weaken + the security of kubeadm since other nodes can impersonate the control-plane. type: boolean required: - token type: object file: - description: File is used to specify a file or - URL to a kubeconfig file from which to load - cluster information BootstrapToken and File - are mutually exclusive + description: |- + File is used to specify a file or URL to a kubeconfig file from which to load cluster information + BootstrapToken and File are mutually exclusive properties: kubeConfigPath: description: KubeConfigPath is used to specify @@ -833,26 +799,24 @@ spec: description: Timeout modifies the discovery timeout type: string tlsBootstrapToken: - description: TLSBootstrapToken is a token used - for TLS bootstrapping. If .BootstrapToken is - set, this field is defaulted to .BootstrapToken.Token, - but can be overridden. If .File is set, this - field **must be set** in case the KubeConfigFile - does not contain any other authentication information + description: |- + TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information type: string type: object kind: - description: 'Kind is a string value representing - the REST resource this object represents. Servers - may infer this from the endpoint the client submits - requests to. Cannot be updated. In CamelCase. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string nodeRegistration: - description: NodeRegistration holds fields that relate - to registering the new control-plane node to the - cluster. When used in the context of control plane - nodes, NodeRegistration should remain consistent + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent across both InitConfiguration and JoinConfiguration properties: criSocket: @@ -870,53 +834,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra - arguments to the kubelet. The arguments here - are passed to the kubelet command line via the - environment file kubeadm writes at runtime for - the kubelet to source. This overrides the generic - base-level configuration in the kubelet-config-1.X - ConfigMap Flags have higher priority when parsing. - These values are local and specific to the node - kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field - of the Node API object that will be created - in this `kubeadm init` or `kubeadm join` operation. - This field is also used in the CommonName field - of the kubelet's client certificate to the API - server. Defaults to the hostname of the node - if not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the - Node API object should be registered with. If - this field is unset, i.e. nil, in the `kubeadm - init` process it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane - node, set this field to an empty slice, i.e. - `taints: {}` in the YAML file. This field is - solely used for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached - to has the "effect" on any pod that does not - tolerate the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the - taint on pods that do not tolerate the - taint. Valid effects are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time - at which the taint was added. It is only - written for NoExecute taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -967,14 +919,20 @@ spec: type: string type: array useExperimentalRetryJoin: - description: "UseExperimentalRetryJoin replaces a basic - kubeadm command with a shell script with retries for - joins. \n This is meant to be an experimental temporary - workaround on some environments where joins fail due - to timing (and other issues). The long term goal is - to add retries to kubeadm proper and use that functionality. - \n This will add about 40KB to userdata \n For more - information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055." + description: |- + UseExperimentalRetryJoin replaces a basic kubeadm command with a shell + script with retries for joins. + + + This is meant to be an experimental temporary workaround on some environments + where joins fail due to timing (and other issues). The long term goal is to add retries to + kubeadm proper and use that functionality. + + + This will add about 40KB to userdata + + + For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. type: boolean users: description: Users specifies extra users to add @@ -1031,102 +989,108 @@ spec: type: object type: array verbosity: - description: Verbosity is the number for the kubeadm log - level verbosity. It overrides the `--v` flag in kubeadm - commands. + description: |- + Verbosity is the number for the kubeadm log level verbosity. + It overrides the `--v` flag in kubeadm commands. format: int32 type: integer type: object machineTemplate: - description: MachineTemplate contains information about how - machines should be shaped when creating or updating a control - plane. + description: |- + MachineTemplate contains information about how machines + should be shaped when creating or updating a control plane. properties: infrastructureRef: - description: InfrastructureRef is a required reference - to a custom resource offered by an infrastructure provider. + description: |- + InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object - instead of an entire object, this string should - contain a valid JSON/Go field access statement, - such as desiredState.manifest.containers[2]. For - example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container - that triggered the event) or if no container name - is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part - of an object. TODO: this design is not final and - this field is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this - reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object x-kubernetes-map-type: atomic metadata: - description: 'Standard object''s metadata. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value - map stored with a resource that may be set by external - tools to store and retrieve arbitrary metadata. - They are not queryable and should be preserved when - modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can - be used to organize and categorize (scope and select) - objects. May match selectors of replication controllers - and services. More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of - time that the controller will spend on draining a controlplane - node The default value is 0, meaning that the node can - be drained without any time limitations. NOTE: NodeDrainTimeout - is different from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string required: - infrastructureRef type: object replicas: - description: Number of desired machines. Defaults to 1. When - stacked etcd is used only odd numbers are permitted, as - per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members). - This is a pointer to distinguish between explicit zero and - not specified. + description: |- + Number of desired machines. Defaults to 1. When stacked etcd is used only + odd numbers are permitted, as per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members). + This is a pointer to distinguish between explicit zero and not specified. format: int32 type: integer rolloutAfter: - description: RolloutAfter is a field to indicate a rollout - should be performed after the specified time even if no - changes have been made to the KubeadmControlPlane. + description: |- + RolloutAfter is a field to indicate a rollout should be performed + after the specified time even if no changes have been made to the + KubeadmControlPlane. format: date-time type: string rolloutStrategy: @@ -1134,28 +1098,33 @@ spec: rollingUpdate: maxSurge: 1 type: RollingUpdate - description: The RolloutStrategy to use to replace control - plane machines with new ones. + description: |- + The RolloutStrategy to use to replace control plane machines with + new ones. properties: rollingUpdate: - description: Rolling update config params. Present only - if RolloutStrategyType = RollingUpdate. + description: |- + Rolling update config params. Present only if + RolloutStrategyType = RollingUpdate. properties: maxSurge: anyOf: - type: integer - type: string - description: 'The maximum number of control planes - that can be scheduled above or under the desired - number of control planes. Value can be an absolute - number 1 or 0. Defaults to 1. Example: when this - is set to 1, the control plane can be scaled up - immediately when the rolling update starts.' + description: |- + The maximum number of control planes that can be scheduled above or under the + desired number of control planes. + Value can be an absolute number 1 or 0. + Defaults to 1. + Example: when this is set to 1, the control plane can be scaled + up immediately when the rolling update starts. x-kubernetes-int-or-string: true type: object type: - description: Type of rollout. Currently the only supported - strategy is "RollingUpdate". Default is RollingUpdate. + description: |- + Type of rollout. Currently the only supported strategy is + "RollingUpdate". + Default is RollingUpdate. type: string type: object version: @@ -1173,7 +1142,7 @@ spec: - template type: object type: object - served: true + served: false storage: false subresources: {} - additionalPrinterColumns: @@ -1188,14 +1157,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -1208,37 +1182,41 @@ spec: needed to create a KubeadmControlPlane from a template. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: - description: 'KubeadmControlPlaneTemplateResourceSpec defines - the desired state of KubeadmControlPlane. NOTE: KubeadmControlPlaneTemplateResourceSpec - is similar to KubeadmControlPlaneSpec but omits Replicas and - Version fields. These fields do not make sense on the KubeadmControlPlaneTemplate, - because they are calculated by the Cluster topology reconciler - during reconciliation and thus cannot be configured on the KubeadmControlPlaneTemplate.' + description: |- + KubeadmControlPlaneTemplateResourceSpec defines the desired state of KubeadmControlPlane. + NOTE: KubeadmControlPlaneTemplateResourceSpec is similar to KubeadmControlPlaneSpec but + omits Replicas and Version fields. These fields do not make sense on the KubeadmControlPlaneTemplate, + because they are calculated by the Cluster topology reconciler during reconciliation and thus cannot + be configured on the KubeadmControlPlaneTemplate. properties: kubeadmConfigSpec: - description: KubeadmConfigSpec is a KubeadmConfigSpec to use - for initializing and joining machines to the control plane. + description: |- + KubeadmConfigSpec is a KubeadmConfigSpec + to use for initializing and joining machines to the control plane. properties: clusterConfiguration: description: ClusterConfiguration along with InitConfiguration @@ -1257,23 +1235,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags - to pass to the control plane component. TODO: - This is temporary and ideally we would like - to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements - describing volumes that are mounted from the + description: |- + HostPathMount contains elements describing volumes that are mounted from the host. properties: hostPath: - description: HostPath is the path in the - host that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside @@ -1303,37 +1281,34 @@ spec: type: string type: object apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal - value, and may reject unrecognized values. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string certificatesDir: - description: 'CertificatesDir specifies where to store - or look for all required certificates. NB: if not - provided, this will default to `/etc/kubernetes/pki`' + description: |- + CertificatesDir specifies where to store or look for all required certificates. + NB: if not provided, this will default to `/etc/kubernetes/pki` type: string clusterName: description: The cluster name type: string controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP - address or DNS name for the control plane; it can - be a valid IP address or a RFC-1123 DNS subdomain, - both with optional TCP port. In case the ControlPlaneEndpoint - is not specified, the AdvertiseAddress + BindPort - are used; in case the ControlPlaneEndpoint is specified - but without a TCP port, the BindPort is used. Possible - usages are: e.g. In a cluster with more than one - control plane instances, this field should be assigned - the address of the external load balancer in front - of the control plane instances. e.g. in environments - with enforced node recycling, the ControlPlaneEndpoint - could be used for assigning a stable DNS to the - control plane. NB: This value defaults to the first - value in the Cluster object status.apiEndpoints - array.' + description: |- + ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + the BindPort is used. + Possible usages are: + e.g. In a cluster with more than one control plane instances, this field should be + assigned the address of the external load balancer in front of the + control plane instances. + e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + could be used for assigning a stable DNS to the control plane. + NB: This value defaults to the first value in the Cluster object status.apiEndpoints array. type: string controllerManager: description: ControllerManager contains extra settings @@ -1342,23 +1317,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags - to pass to the control plane component. TODO: - This is temporary and ideally we would like - to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements - describing volumes that are mounted from the + description: |- + HostPathMount contains elements describing volumes that are mounted from the host. properties: hostPath: - description: HostPath is the path in the - host that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside @@ -1388,35 +1363,34 @@ spec: installed in the cluster. properties: imageRepository: - description: ImageRepository sets the container - registry to pull images from. if not set, the - ImageRepository defined in ClusterConfiguration - will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a tag - for the image. In case this value is set, kubeadm - does not change automatically the version of - the above components during upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string type: object etcd: - description: 'Etcd holds configuration for etcd. NB: - This value defaults to a Local (stacked) etcd' + description: |- + Etcd holds configuration for etcd. + NB: This value defaults to a Local (stacked) etcd properties: external: - description: External describes how to connect - to an external etcd cluster Local and External - are mutually exclusive + description: |- + External describes how to connect to an external etcd cluster + Local and External are mutually exclusive properties: caFile: - description: CAFile is an SSL Certificate - Authority file used to secure etcd communication. + description: |- + CAFile is an SSL Certificate Authority file used to secure etcd communication. Required if using a TLS connection. type: string certFile: - description: CertFile is an SSL certification - file used to secure etcd communication. + description: |- + CertFile is an SSL certification file used to secure etcd communication. Required if using a TLS connection. type: string endpoints: @@ -1426,9 +1400,9 @@ spec: type: string type: array keyFile: - description: KeyFile is an SSL key file used - to secure etcd communication. Required if - using a TLS connection. + description: |- + KeyFile is an SSL key file used to secure etcd communication. + Required if using a TLS connection. type: string required: - caFile @@ -1437,33 +1411,31 @@ spec: - keyFile type: object local: - description: Local provides configuration knobs - for configuring the local etcd instance Local - and External are mutually exclusive + description: |- + Local provides configuration knobs for configuring the local etcd instance + Local and External are mutually exclusive properties: dataDir: - description: DataDir is the directory etcd - will place its data. Defaults to "/var/lib/etcd". + description: |- + DataDir is the directory etcd will place its data. + Defaults to "/var/lib/etcd". type: string extraArgs: additionalProperties: type: string - description: ExtraArgs are extra arguments - provided to the etcd binary when run inside - a static pod. + description: |- + ExtraArgs are extra arguments provided to the etcd binary + when run inside a static pod. type: object imageRepository: - description: ImageRepository sets the container - registry to pull images from. if not set, - the ImageRepository defined in ClusterConfiguration - will be used instead. + description: |- + ImageRepository sets the container registry to pull images from. + if not set, the ImageRepository defined in ClusterConfiguration will be used instead. type: string imageTag: - description: ImageTag allows to specify a - tag for the image. In case this value is - set, kubeadm does not change automatically - the version of the above components during - upgrades. + description: |- + ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. type: string peerCertSANs: description: PeerCertSANs sets extra Subject @@ -1487,55 +1459,52 @@ spec: description: FeatureGates enabled by the user. type: object imageRepository: - description: 'ImageRepository sets the container registry - to pull images from. * If not set, the default registry - of kubeadm will be used, i.e. * registry.k8s.io - (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, - >= v1.25.0 * k8s.gcr.io (old registry): all older - versions Please note that when imageRepository is - not set we don''t allow upgrades to versions >= - v1.22.0 which use the old registry (k8s.gcr.io). - Please use a newer patch version with the new registry - instead (i.e. >= v1.22.17, >= v1.23.15, >= v1.24.9, - >= v1.25.0). * If the version is a CI build (kubernetes - version starts with `ci/` or `ci-cross/`) `gcr.io/k8s-staging-ci-images` - will be used as a default for control plane components - and for kube-proxy, while `registry.k8s.io` will - be used for all the other images.' + description: |- + ImageRepository sets the container registry to pull images from. + * If not set, the default registry of kubeadm will be used, i.e. + * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 + * k8s.gcr.io (old registry): all older versions + Please note that when imageRepository is not set we don't allow upgrades to + versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use + a newer patch version with the new registry instead (i.e. >= v1.22.17, + >= v1.23.15, >= v1.24.9, >= v1.25.0). + * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components + and for kube-proxy, while `registry.k8s.io` will be used for all the other images. type: string kind: - description: 'Kind is a string value representing - the REST resource this object represents. Servers - may infer this from the endpoint the client submits - requests to. Cannot be updated. In CamelCase. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string kubernetesVersion: - description: 'KubernetesVersion is the target version - of the control plane. NB: This value defaults to - the Machine object spec.version' + description: |- + KubernetesVersion is the target version of the control plane. + NB: This value defaults to the Machine object spec.version type: string networking: - description: 'Networking holds configuration for the - networking topology of the cluster. NB: This value - defaults to the Cluster object spec.clusterNetwork.' + description: |- + Networking holds configuration for the networking topology of the cluster. + NB: This value defaults to the Cluster object spec.clusterNetwork. properties: dnsDomain: description: DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". type: string podSubnet: - description: PodSubnet is the subnet used by pods. - If unset, the API server will not allocate CIDR - ranges for every node. Defaults to a comma-delimited - string of the Cluster object's spec.clusterNetwork.services.cidrBlocks - if that is set + description: |- + PodSubnet is the subnet used by pods. + If unset, the API server will not allocate CIDR ranges for every node. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.services.cidrBlocks if that is set type: string serviceSubnet: - description: ServiceSubnet is the subnet used - by k8s services. Defaults to a comma-delimited - string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, - or to "10.96.0.0/12" if that's unset. + description: |- + ServiceSubnet is the subnet used by k8s services. + Defaults to a comma-delimited string of the Cluster object's spec.clusterNetwork.pods.cidrBlocks, or + to "10.96.0.0/12" if that's unset. type: string type: object scheduler: @@ -1545,23 +1514,23 @@ spec: extraArgs: additionalProperties: type: string - description: 'ExtraArgs is an extra set of flags - to pass to the control plane component. TODO: - This is temporary and ideally we would like - to switch all components to use ComponentConfig - + ConfigMaps.' + description: |- + ExtraArgs is an extra set of flags to pass to the control plane component. + TODO: This is temporary and ideally we would like to switch all components to + use ComponentConfig + ConfigMaps. type: object extraVolumes: description: ExtraVolumes is an extra set of host volumes, mounted to the control plane component. items: - description: HostPathMount contains elements - describing volumes that are mounted from the + description: |- + HostPathMount contains elements describing volumes that are mounted from the host. properties: hostPath: - description: HostPath is the path in the - host that will be mounted inside the pod. + description: |- + HostPath is the path in the host that will be mounted inside + the pod. type: string mountPath: description: MountPath is the path inside @@ -1618,10 +1587,9 @@ spec: is used. type: string overwrite: - description: Overwrite defines whether or not - to overwrite any existing filesystem. If true, - any pre-existing file system will be destroyed. - Use with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition @@ -1630,11 +1598,9 @@ spec: is the actual partition number.' type: string replaceFS: - description: 'ReplaceFS is a special directive, - used for Microsoft Azure that instructs cloud-init - to replace a file system of . NOTE: - unless you define a label, this requires the - use of the ''any'' partition directive.' + description: |- + ReplaceFS is a special directive, used for Microsoft Azure that instructs cloud-init to replace a file system of . + NOTE: unless you define a label, this requires the use of the 'any' partition directive. type: string required: - device @@ -1653,23 +1619,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. - If it is true, a single partition will be - created for the entire device. When layout - is false, it means don't partition or ignore - existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to - skip checks and create the partition if a - partition or filesystem is found on the device. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of - partition table. The following are supported: - ''mbr'': default and setups a MS-DOS partition - table ''gpt'': setups a GPT partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -1754,11 +1718,12 @@ spec: configuration. properties: additionalConfig: - description: "AdditionalConfig contains additional - configuration to be merged with the Ignition - configuration generated by the bootstrapper - controller. More info: https://coreos.github.io/ignition/operator-notes/#config-merging - \n The data format is documented here: https://kinvolk.io/docs/flatcar-container-linux/latest/provisioning/cl-config/" + description: |- + AdditionalConfig contains additional configuration to be merged with the Ignition + configuration generated by the bootstrapper controller. More info: https://coreos.github.io/ignition/operator-notes/#config-merging + + + The data format is documented here: https://kinvolk.io/docs/flatcar-container-linux/latest/provisioning/cl-config/ type: string strict: description: Strict controls if AdditionalConfig @@ -1772,57 +1737,52 @@ spec: are the configurations necessary for the init command properties: apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal - value, and may reject unrecognized values. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm - init` time and describes a set of Bootstrap Tokens - to create. This information IS NOT uploaded to the - kubeadm cluster configmap, partly because of its - sensitive nature + description: |- + BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature items: description: BootstrapToken describes one bootstrap token, stored as a Secret in the cluster. properties: description: - description: Description sets a human-friendly - message why this token exists and what it's - used for, so other administrators can know - its purpose. + description: |- + Description sets a human-friendly message why this token exists and what it's used + for, so other administrators can know its purpose. type: string expires: - description: Expires specifies the timestamp - when this token expires. Defaults to being - set dynamically at runtime based on the TTL. - Expires and TTL are mutually exclusive. + description: |- + Expires specifies the timestamp when this token expires. Defaults to being set + dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. format: date-time type: string groups: - description: Groups specifies the extra groups - that this token will authenticate as when/if + description: |- + Groups specifies the extra groups that this token will authenticate as when/if used for authentication items: type: string type: array token: - description: Token is used for establishing - bidirectional trust between nodes and control-planes. + description: |- + Token is used for establishing bidirectional trust between nodes and control-planes. Used for joining nodes in the cluster. type: string ttl: - description: TTL defines the time to live for - this token. Defaults to 24h. Expires and TTL - are mutually exclusive. + description: |- + TTL defines the time to live for this token. Defaults to 24h. + Expires and TTL are mutually exclusive. type: string usages: - description: Usages describes the ways in which - this token can be used. Can by default be - used for establishing bidirectional trust, - but that can be changed here. + description: |- + Usages describes the ways in which this token can be used. Can by default be used + for establishing bidirectional trust, but that can be changed here. items: type: string type: array @@ -1831,42 +1791,37 @@ spec: type: object type: array kind: - description: 'Kind is a string value representing - the REST resource this object represents. Servers - may infer this from the endpoint the client submits - requests to. Cannot be updated. In CamelCase. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint - of the API server instance that's deployed on this - control plane node In HA setups, this differs from - ClusterConfiguration.ControlPlaneEndpoint in the - sense that ControlPlaneEndpoint is the global endpoint - for the cluster, which then loadbalances the requests - to each individual API server. This configuration - object lets you customize what IP/DNS name and port - the local API server advertises it's accessible - on. By default, kubeadm tries to auto-detect the - IP of the default interface and use that, but in - case that process fails you may set the desired - value here. + description: |- + LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process + fails you may set the desired value here. properties: advertiseAddress: description: AdvertiseAddress sets the IP address for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port for - the API Server to bind to. Defaults to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object nodeRegistration: - description: NodeRegistration holds fields that relate - to registering the new control-plane node to the - cluster. When used in the context of control plane - nodes, NodeRegistration should remain consistent + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent across both InitConfiguration and JoinConfiguration properties: criSocket: @@ -1882,13 +1837,12 @@ spec: type: string type: array imagePullPolicy: - description: ImagePullPolicy specifies the policy - for image pulling during kubeadm "init" and - "join" operations. The value of this field must - be one of "Always", "IfNotPresent" or "Never". - Defaults to "IfNotPresent". This can be used - only with Kubernetes version equal to 1.22 and - later. + description: |- + ImagePullPolicy specifies the policy for image pulling + during kubeadm "init" and "join" operations. The value of + this field must be one of "Always", "IfNotPresent" or + "Never". Defaults to "IfNotPresent". This can be used only + with Kubernetes version equal to 1.22 and later. enum: - Always - IfNotPresent @@ -1897,53 +1851,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra - arguments to the kubelet. The arguments here - are passed to the kubelet command line via the - environment file kubeadm writes at runtime for - the kubelet to source. This overrides the generic - base-level configuration in the kubelet-config-1.X - ConfigMap Flags have higher priority when parsing. - These values are local and specific to the node - kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field - of the Node API object that will be created - in this `kubeadm init` or `kubeadm join` operation. - This field is also used in the CommonName field - of the kubelet's client certificate to the API - server. Defaults to the hostname of the node - if not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the - Node API object should be registered with. If - this field is unset, i.e. nil, in the `kubeadm - init` process it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane - node, set this field to an empty slice, i.e. - `taints: []` in the YAML file. This field is - solely used for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: []` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached - to has the "effect" on any pod that does not - tolerate the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the - taint on pods that do not tolerate the - taint. Valid effects are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time - at which the taint was added. It is only - written for NoExecute taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -1957,34 +1899,28 @@ spec: type: array type: object patches: - description: Patches contains options related to applying - patches to components deployed by kubeadm during - "kubeadm init". The minimum kubernetes version needed - to support Patches is v1.22 + description: |- + Patches contains options related to applying patches to components deployed by kubeadm during + "kubeadm init". The minimum kubernetes version needed to support Patches is v1.22 properties: directory: - description: Directory is a path to a directory - that contains files named "target[suffix][+patchtype].extension". - For example, "kube-apiserver0+merge.yaml" or - just "etcd.json". "target" can be one of "kube-apiserver", - "kube-controller-manager", "kube-scheduler", - "etcd". "patchtype" can be one of "strategic" - "merge" or "json" and they match the patch formats - supported by kubectl. The default "patchtype" - is "strategic". "extension" must be either "json" - or "yaml". "suffix" is an optional string that - can be used to determine which patches are applied - first alpha-numerically. These files can be - written into the target directory via KubeadmConfig.Files - which specifies additional files to be created - on the machine, either with content inline or + description: |- + Directory is a path to a directory that contains files named "target[suffix][+patchtype].extension". + For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of + "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one + of "strategic" "merge" or "json" and they match the patch formats supported by kubectl. + The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". + "suffix" is an optional string that can be used to determine which patches are applied + first alpha-numerically. + These files can be written into the target directory via KubeadmConfig.Files which + specifies additional files to be created on the machine, either with content inline or by referencing a secret. type: string type: object skipPhases: - description: SkipPhases is a list of phases to skip - during command execution. The list of phases can - be obtained with the "kubeadm init --help" command. + description: |- + SkipPhases is a list of phases to skip during command execution. + The list of phases can be obtained with the "kubeadm init --help" command. This option takes effect only on Kubernetes >=1.22.0. items: type: string @@ -1995,23 +1931,23 @@ spec: for the join command properties: apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal - value, and may reject unrecognized values. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string caCertPath: - description: 'CACertPath is the path to the SSL certificate - authority used to secure comunications between node - and control-plane. Defaults to "/etc/kubernetes/pki/ca.crt". - TODO: revisit when there is defaulting from k/k' + description: |- + CACertPath is the path to the SSL certificate authority used to + secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + TODO: revisit when there is defaulting from k/k type: string controlPlane: - description: ControlPlane defines the additional control - plane instance to be deployed on the joining node. - If nil, no additional control plane instance will - be deployed. + description: |- + ControlPlane defines the additional control plane instance to be deployed on the joining node. + If nil, no additional control plane instance will be deployed. properties: localAPIEndpoint: description: LocalAPIEndpoint represents the endpoint @@ -2023,21 +1959,21 @@ spec: address for the API server to advertise. type: string bindPort: - description: BindPort sets the secure port - for the API Server to bind to. Defaults - to 6443. + description: |- + BindPort sets the secure port for the API Server to bind to. + Defaults to 6443. format: int32 type: integer type: object type: object discovery: - description: 'Discovery specifies the options for - the kubelet to use during the TLS Bootstrap process - TODO: revisit when there is defaulting from k/k' + description: |- + Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + TODO: revisit when there is defaulting from k/k properties: bootstrapToken: - description: BootstrapToken is used to set the - options for bootstrap token based discovery + description: |- + BootstrapToken is used to set the options for bootstrap token based discovery BootstrapToken and File are mutually exclusive properties: apiServerEndpoint: @@ -2046,43 +1982,36 @@ spec: info will be fetched. type: string caCertHashes: - description: 'CACertHashes specifies a set - of public key pins to verify when token-based - discovery is used. The root CA found during - discovery must match one of these values. - Specifying an empty set disables root CA - pinning, which can be unsafe. Each hash - is specified as ":", where - the only currently supported type is "sha256". - This is a hex-encoded SHA-256 hash of the - Subject Public Key Info (SPKI) object in - DER-encoded ASN.1. These hashes can be calculated - using, for example, OpenSSL: openssl x509 - -pubkey -in ca.crt openssl rsa -pubin -outform - der 2>&/dev/null | openssl dgst -sha256 - -hex' + description: |- + CACertHashes specifies a set of public key pins to verify + when token-based discovery is used. The root CA found during discovery + must match one of these values. Specifying an empty set disables root CA + pinning, which can be unsafe. Each hash is specified as ":", + where the only currently supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + ASN.1. These hashes can be calculated using, for example, OpenSSL: + openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex items: type: string type: array token: - description: Token is a token used to validate - cluster information fetched from the control-plane. + description: |- + Token is a token used to validate cluster information + fetched from the control-plane. type: string unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows - token-based discovery without CA verification - via CACertHashes. This can weaken the security - of kubeadm since other nodes can impersonate - the control-plane. + description: |- + UnsafeSkipCAVerification allows token-based discovery + without CA verification via CACertHashes. This can weaken + the security of kubeadm since other nodes can impersonate the control-plane. type: boolean required: - token type: object file: - description: File is used to specify a file or - URL to a kubeconfig file from which to load - cluster information BootstrapToken and File - are mutually exclusive + description: |- + File is used to specify a file or URL to a kubeconfig file from which to load cluster information + BootstrapToken and File are mutually exclusive properties: kubeConfigPath: description: KubeConfigPath is used to specify @@ -2096,26 +2025,24 @@ spec: description: Timeout modifies the discovery timeout type: string tlsBootstrapToken: - description: TLSBootstrapToken is a token used - for TLS bootstrapping. If .BootstrapToken is - set, this field is defaulted to .BootstrapToken.Token, - but can be overridden. If .File is set, this - field **must be set** in case the KubeConfigFile - does not contain any other authentication information + description: |- + TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information type: string type: object kind: - description: 'Kind is a string value representing - the REST resource this object represents. Servers - may infer this from the endpoint the client submits - requests to. Cannot be updated. In CamelCase. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string nodeRegistration: - description: NodeRegistration holds fields that relate - to registering the new control-plane node to the - cluster. When used in the context of control plane - nodes, NodeRegistration should remain consistent + description: |- + NodeRegistration holds fields that relate to registering the new control-plane node to the cluster. + When used in the context of control plane nodes, NodeRegistration should remain consistent across both InitConfiguration and JoinConfiguration properties: criSocket: @@ -2131,13 +2058,12 @@ spec: type: string type: array imagePullPolicy: - description: ImagePullPolicy specifies the policy - for image pulling during kubeadm "init" and - "join" operations. The value of this field must - be one of "Always", "IfNotPresent" or "Never". - Defaults to "IfNotPresent". This can be used - only with Kubernetes version equal to 1.22 and - later. + description: |- + ImagePullPolicy specifies the policy for image pulling + during kubeadm "init" and "join" operations. The value of + this field must be one of "Always", "IfNotPresent" or + "Never". Defaults to "IfNotPresent". This can be used only + with Kubernetes version equal to 1.22 and later. enum: - Always - IfNotPresent @@ -2146,53 +2072,41 @@ spec: kubeletExtraArgs: additionalProperties: type: string - description: KubeletExtraArgs passes through extra - arguments to the kubelet. The arguments here - are passed to the kubelet command line via the - environment file kubeadm writes at runtime for - the kubelet to source. This overrides the generic - base-level configuration in the kubelet-config-1.X - ConfigMap Flags have higher priority when parsing. - These values are local and specific to the node - kubeadm is executing on. + description: |- + KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. type: object name: - description: Name is the `.Metadata.Name` field - of the Node API object that will be created - in this `kubeadm init` or `kubeadm join` operation. - This field is also used in the CommonName field - of the kubelet's client certificate to the API - server. Defaults to the hostname of the node - if not provided. + description: |- + Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. + This field is also used in the CommonName field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. type: string taints: - description: 'Taints specifies the taints the - Node API object should be registered with. If - this field is unset, i.e. nil, in the `kubeadm - init` process it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your control-plane - node, set this field to an empty slice, i.e. - `taints: []` in the YAML file. This field is - solely used for Node registration.' + description: |- + Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an + empty slice, i.e. `taints: []` in the YAML file. This field is solely used for Node registration. items: - description: The node this Taint is attached - to has the "effect" on any pod that does not - tolerate the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the - taint on pods that do not tolerate the - taint. Valid effects are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time - at which the taint was added. It is only - written for NoExecute taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -2206,34 +2120,28 @@ spec: type: array type: object patches: - description: Patches contains options related to applying - patches to components deployed by kubeadm during - "kubeadm join". The minimum kubernetes version needed - to support Patches is v1.22 + description: |- + Patches contains options related to applying patches to components deployed by kubeadm during + "kubeadm join". The minimum kubernetes version needed to support Patches is v1.22 properties: directory: - description: Directory is a path to a directory - that contains files named "target[suffix][+patchtype].extension". - For example, "kube-apiserver0+merge.yaml" or - just "etcd.json". "target" can be one of "kube-apiserver", - "kube-controller-manager", "kube-scheduler", - "etcd". "patchtype" can be one of "strategic" - "merge" or "json" and they match the patch formats - supported by kubectl. The default "patchtype" - is "strategic". "extension" must be either "json" - or "yaml". "suffix" is an optional string that - can be used to determine which patches are applied - first alpha-numerically. These files can be - written into the target directory via KubeadmConfig.Files - which specifies additional files to be created - on the machine, either with content inline or + description: |- + Directory is a path to a directory that contains files named "target[suffix][+patchtype].extension". + For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of + "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd". "patchtype" can be one + of "strategic" "merge" or "json" and they match the patch formats supported by kubectl. + The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". + "suffix" is an optional string that can be used to determine which patches are applied + first alpha-numerically. + These files can be written into the target directory via KubeadmConfig.Files which + specifies additional files to be created on the machine, either with content inline or by referencing a secret. type: string type: object skipPhases: - description: SkipPhases is a list of phases to skip - during command execution. The list of phases can - be obtained with the "kubeadm init --help" command. + description: |- + SkipPhases is a list of phases to skip during command execution. + The list of phases can be obtained with the "kubeadm init --help" command. This option takes effect only on Kubernetes >=1.22.0. items: type: string @@ -2276,18 +2184,24 @@ spec: type: string type: array useExperimentalRetryJoin: - description: "UseExperimentalRetryJoin replaces a basic - kubeadm command with a shell script with retries for - joins. \n This is meant to be an experimental temporary - workaround on some environments where joins fail due - to timing (and other issues). The long term goal is - to add retries to kubeadm proper and use that functionality. - \n This will add about 40KB to userdata \n For more - information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. - \n Deprecated: This experimental fix is no longer needed - and this field will be removed in a future release. - When removing also remove from staticcheck exclude-rules - for SA1019 in golangci.yml" + description: |- + UseExperimentalRetryJoin replaces a basic kubeadm command with a shell + script with retries for joins. + + + This is meant to be an experimental temporary workaround on some environments + where joins fail due to timing (and other issues). The long term goal is to add retries to + kubeadm proper and use that functionality. + + + This will add about 40KB to userdata + + + For more information, refer to https://github.com/kubernetes-sigs/cluster-api/pull/2763#discussion_r397306055. + + + Deprecated: This experimental fix is no longer needed and this field will be removed in a future release. + When removing also remove from staticcheck exclude-rules for SA1019 in golangci.yml type: boolean users: description: Users specifies extra users to add @@ -2367,60 +2281,57 @@ spec: type: object type: array verbosity: - description: Verbosity is the number for the kubeadm log - level verbosity. It overrides the `--v` flag in kubeadm - commands. + description: |- + Verbosity is the number for the kubeadm log level verbosity. + It overrides the `--v` flag in kubeadm commands. format: int32 type: integer type: object machineTemplate: - description: MachineTemplate contains information about how - machines should be shaped when creating or updating a control - plane. + description: |- + MachineTemplate contains information about how machines + should be shaped when creating or updating a control plane. properties: metadata: - description: 'Standard object''s metadata. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value - map stored with a resource that may be set by external - tools to store and retrieve arbitrary metadata. - They are not queryable and should be preserved when - modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can - be used to organize and categorize (scope and select) - objects. May match selectors of replication controllers - and services. More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object nodeDeletionTimeout: - description: NodeDeletionTimeout defines how long the - machine controller will attempt to delete the Node that - the Machine hosts after the Machine is marked for deletion. - A duration of 0 will retry deletion indefinitely. If - no value is provided, the default value for this property - of the Machine resource will be used. + description: |- + NodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + If no value is provided, the default value for this property of the Machine resource will be used. type: string nodeDrainTimeout: - description: 'NodeDrainTimeout is the total amount of - time that the controller will spend on draining a controlplane - node The default value is 0, meaning that the node can - be drained without any time limitations. NOTE: NodeDrainTimeout - is different from `kubectl drain --timeout`' + description: |- + NodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` type: string nodeVolumeDetachTimeout: - description: NodeVolumeDetachTimeout is the total amount - of time that the controller will spend on waiting for - all volumes to be detached. The default value is 0, - meaning that the volumes can be detached without any - time limitations. + description: |- + NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. type: string type: object remediationStrategy: @@ -2429,63 +2340,68 @@ spec: properties: maxRetry: description: "MaxRetry is the Max number of retries while - attempting to remediate an unhealthy machine. A retry + attempting to remediate an unhealthy machine.\nA retry happens when a machine that was created as a replacement - for an unhealthy machine also fails. For example, given - a control plane with three machines M1, M2, M3: \n M1 + for an unhealthy machine also fails.\nFor example, given + a control plane with three machines M1, M2, M3:\n\n\n\tM1 become unhealthy; remediation happens, and M1-1 is created - as a replacement. If M1-1 (replacement of M1) has problems - while bootstrapping it will become unhealthy, and then - be remediated; such operation is considered a retry, - remediation-retry #1. If M1-2 (replacement of M1-2) - becomes unhealthy, remediation-retry #2 will happen, - etc. \n A retry could happen only after RetryPeriod - from the previous retry. If a machine is marked as unhealthy - after MinHealthyPeriod from the previous remediation - expired, this is not considered a retry anymore because + as a replacement.\n\tIf M1-1 (replacement of M1) has + problems while bootstrapping it will become unhealthy, + and then be\n\tremediated; such operation is considered + a retry, remediation-retry #1.\n\tIf M1-2 (replacement + of M1-1) becomes unhealthy, remediation-retry #2 will + happen, etc.\n\n\nA retry could happen only after RetryPeriod + from the previous retry.\nIf a machine is marked as + unhealthy after MinHealthyPeriod from the previous remediation + expired,\nthis is not considered a retry anymore because the new issue is assumed unrelated from the previous - one. \n If not set, the remedation will be retried infinitely." + one.\n\n\nIf not set, the remedation will be retried + infinitely." format: int32 type: integer minHealthyPeriod: description: "MinHealthyPeriod defines the duration after - which KCP will consider any failure to a machine unrelated - from the previous one. In this case the remediation - is not considered a retry anymore, and thus the retry - counter restarts from 0. For example, assuming MinHealthyPeriod - is set to 1h (default) \n M1 become unhealthy; remediation - happens, and M1-1 is created as a replacement. If M1-1 - (replacement of M1) has problems within the 1hr after - the creation, also this machine will be remediated and - this operation is considered a retry - a problem related - to the original issue happened to M1 -. \n If instead - the problem on M1-1 is happening after MinHealthyPeriod - expired, e.g. four days after m1-1 has been created + which KCP will consider any failure to a machine unrelated\nfrom + the previous one. In this case the remediation is not + considered a retry anymore, and thus the retry\ncounter + restarts from 0. For example, assuming MinHealthyPeriod + is set to 1h (default)\n\n\n\tM1 become unhealthy; remediation + happens, and M1-1 is created as a replacement.\n\tIf + M1-1 (replacement of M1) has problems within the 1hr + after the creation, also\n\tthis machine will be remediated + and this operation is considered a retry - a problem + related\n\tto the original issue happened to M1 -.\n\n\n\tIf + instead the problem on M1-1 is happening after MinHealthyPeriod + expired, e.g. four days after\n\tm1-1 has been created as a remediation of M1, the problem on M1-1 is considered - unrelated to the original issue happened to M1. \n If + unrelated to\n\tthe original issue happened to M1.\n\n\nIf not set, this value is defaulted to 1h." type: string retryPeriod: - description: "RetryPeriod is the duration that KCP should - wait before remediating a machine being created as a - replacement for an unhealthy machine (a retry). \n If - not set, a retry will happen immediately." + description: |- + RetryPeriod is the duration that KCP should wait before remediating a machine being created as a replacement + for an unhealthy machine (a retry). + + + If not set, a retry will happen immediately. type: string type: object rolloutAfter: - description: RolloutAfter is a field to indicate a rollout - should be performed after the specified time even if no - changes have been made to the KubeadmControlPlane. + description: |- + RolloutAfter is a field to indicate a rollout should be performed + after the specified time even if no changes have been made to the + KubeadmControlPlane. format: date-time type: string rolloutBefore: - description: RolloutBefore is a field to indicate a rollout - should be performed if the specified criteria is met. + description: |- + RolloutBefore is a field to indicate a rollout should be performed + if the specified criteria is met. properties: certificatesExpiryDays: - description: CertificatesExpiryDays indicates a rollout - needs to be performed if the certificates of the machine - will expire within the specified days. + description: |- + CertificatesExpiryDays indicates a rollout needs to be performed if the + certificates of the machine will expire within the specified days. format: int32 type: integer type: object @@ -2494,28 +2410,33 @@ spec: rollingUpdate: maxSurge: 1 type: RollingUpdate - description: The RolloutStrategy to use to replace control - plane machines with new ones. + description: |- + The RolloutStrategy to use to replace control plane machines with + new ones. properties: rollingUpdate: - description: Rolling update config params. Present only - if RolloutStrategyType = RollingUpdate. + description: |- + Rolling update config params. Present only if + RolloutStrategyType = RollingUpdate. properties: maxSurge: anyOf: - type: integer - type: string - description: 'The maximum number of control planes - that can be scheduled above or under the desired - number of control planes. Value can be an absolute - number 1 or 0. Defaults to 1. Example: when this - is set to 1, the control plane can be scaled up - immediately when the rolling update starts.' + description: |- + The maximum number of control planes that can be scheduled above or under the + desired number of control planes. + Value can be an absolute number 1 or 0. + Defaults to 1. + Example: when this is set to 1, the control plane can be scaled + up immediately when the rolling update starts. x-kubernetes-int-or-string: true type: object type: - description: Type of rollout. Currently the only supported - strategy is "RollingUpdate". Default is RollingUpdate. + description: |- + Type of rollout. Currently the only supported strategy is + "RollingUpdate". + Default is RollingUpdate. type: string type: object required: diff --git a/controlplane/kubeadm/config/crd/kustomization.yaml b/controlplane/kubeadm/config/crd/kustomization.yaml index 9f62fc8d9a09..266cb6e266cb 100644 --- a/controlplane/kubeadm/config/crd/kustomization.yaml +++ b/controlplane/kubeadm/config/crd/kustomization.yaml @@ -1,29 +1,29 @@ -commonLabels: - cluster.x-k8s.io/v1alpha3: v1alpha3 - cluster.x-k8s.io/v1alpha4: v1alpha4 - cluster.x-k8s.io/v1beta1: v1beta1 +labels: +- includeSelectors: true + pairs: + cluster.x-k8s.io/v1beta1: v1beta1 # This kustomization.yaml is not intended to be run by itself, # since it depends on service name and namespace that are out of this kustomize package. # It should be run by config/ resources: - - bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml - - bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml +- bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml +- bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml # +kubebuilder:scaffold:crdkustomizeresource -patchesStrategicMerge: +patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD - - patches/webhook_in_kubeadmcontrolplanes.yaml - - patches/webhook_in_kubeadmcontrolplanetemplates.yaml +- path: patches/webhook_in_kubeadmcontrolplanes.yaml +- path: patches/webhook_in_kubeadmcontrolplanetemplates.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD - - patches/cainjection_in_kubeadmcontrolplanes.yaml - - patches/cainjection_in_kubeadmcontrolplanetemplates.yaml +- path: patches/cainjection_in_kubeadmcontrolplanes.yaml +- path: patches/cainjection_in_kubeadmcontrolplanetemplates.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. configurations: - - kustomizeconfig.yaml +- kustomizeconfig.yaml diff --git a/controlplane/kubeadm/config/crd/kustomizeconfig.yaml b/controlplane/kubeadm/config/crd/kustomizeconfig.yaml index e3fd575d604b..d10c3471df21 100644 --- a/controlplane/kubeadm/config/crd/kustomizeconfig.yaml +++ b/controlplane/kubeadm/config/crd/kustomizeconfig.yaml @@ -13,5 +13,3 @@ namespace: path: spec/conversion/webhook/clientConfig/service/namespace create: false -varReference: - - path: metadata/annotations diff --git a/controlplane/kubeadm/config/crd/patches/cainjection_in_kubeadmcontrolplanes.yaml b/controlplane/kubeadm/config/crd/patches/cainjection_in_kubeadmcontrolplanes.yaml index 08aec1dbb700..8592ccf0e104 100644 --- a/controlplane/kubeadm/config/crd/patches/cainjection_in_kubeadmcontrolplanes.yaml +++ b/controlplane/kubeadm/config/crd/patches/cainjection_in_kubeadmcontrolplanes.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: kubeadmcontrolplanes.controlplane.cluster.x-k8s.io diff --git a/controlplane/kubeadm/config/crd/patches/cainjection_in_kubeadmcontrolplanetemplates.yaml b/controlplane/kubeadm/config/crd/patches/cainjection_in_kubeadmcontrolplanetemplates.yaml index 654e514cb2ca..25c104a96810 100644 --- a/controlplane/kubeadm/config/crd/patches/cainjection_in_kubeadmcontrolplanetemplates.yaml +++ b/controlplane/kubeadm/config/crd/patches/cainjection_in_kubeadmcontrolplanetemplates.yaml @@ -4,5 +4,5 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME name: kubeadmcontrolplanetemplates.controlplane.cluster.x-k8s.io diff --git a/controlplane/kubeadm/config/default/kustomization.yaml b/controlplane/kubeadm/config/default/kustomization.yaml index 3178d82fe330..6e7db03dbefa 100644 --- a/controlplane/kubeadm/config/default/kustomization.yaml +++ b/controlplane/kubeadm/config/default/kustomization.yaml @@ -2,57 +2,120 @@ namespace: capi-kubeadm-control-plane-system namePrefix: capi-kubeadm-control-plane- -commonLabels: - cluster.x-k8s.io/provider: "control-plane-kubeadm" +labels: +- includeSelectors: true + pairs: + cluster.x-k8s.io/provider: control-plane-kubeadm resources: - namespace.yaml +- ../crd +- ../rbac +- ../manager +- ../webhook +- ../certmanager -bases: - - ../crd - - ../rbac - - ../manager - - ../webhook - - ../certmanager - -patchesStrategicMerge: +patches: # Provide customizable hook for make targets. - - manager_image_patch.yaml - - manager_pull_policy.yaml +- path: manager_image_patch.yaml +- path: manager_pull_policy.yaml # Enable webhook. - - manager_webhook_patch.yaml +- path: manager_webhook_patch.yaml # Inject certificate in the webhook definition. - - webhookcainjection_patch.yaml +- path: webhookcainjection_patch.yaml # Enable aggregated ClusterRole aggregation - - manager_role_aggregation_patch.yaml +- path: manager_role_aggregation_patch.yaml -vars: - - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR - objref: - kind: Certificate +replacements: +- source: + fieldPath: .metadata.namespace + group: cert-manager.io + kind: Certificate + name: serving-cert + version: v1 + targets: + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + select: + kind: ValidatingWebhookConfiguration + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + select: + kind: MutatingWebhookConfiguration + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + select: + kind: CustomResourceDefinition +- source: + fieldPath: .metadata.name + group: cert-manager.io + kind: Certificate + name: serving-cert + version: v1 + targets: + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + index: 1 + select: + kind: ValidatingWebhookConfiguration + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + index: 1 + select: + kind: MutatingWebhookConfiguration + - fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + create: true + delimiter: / + index: 1 + select: + kind: CustomResourceDefinition +- source: + fieldPath: .metadata.name + kind: Service + name: webhook-service + version: v1 + targets: + - fieldPaths: + - .spec.dnsNames.0 + - .spec.dnsNames.1 + options: + create: true + delimiter: . + select: group: cert-manager.io - version: v1 - name: serving-cert # this name should match the one in certificate.yaml - fieldref: - fieldpath: metadata.namespace - - name: CERTIFICATE_NAME - objref: kind: Certificate - group: cert-manager.io version: v1 - name: serving-cert # this name should match the one in certificate.yaml - - name: SERVICE_NAMESPACE # namespace of the service - objref: - kind: Service - version: v1 - name: webhook-service - fieldref: - fieldpath: metadata.namespace - - name: SERVICE_NAME - objref: - kind: Service +- source: + fieldPath: .metadata.namespace + kind: Service + name: webhook-service + version: v1 + targets: + - fieldPaths: + - .spec.dnsNames.0 + - .spec.dnsNames.1 + options: + create: true + delimiter: . + index: 1 + select: + group: cert-manager.io + kind: Certificate version: v1 - name: webhook-service - -configurations: - - kustomizeconfig.yaml diff --git a/controlplane/kubeadm/config/default/kustomizeconfig.yaml b/controlplane/kubeadm/config/default/kustomizeconfig.yaml deleted file mode 100644 index eb191e64d056..000000000000 --- a/controlplane/kubeadm/config/default/kustomizeconfig.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# This configuration is for teaching kustomize how to update name ref and var substitution -varReference: -- kind: Deployment - path: spec/template/spec/volumes/secret/secretName diff --git a/controlplane/kubeadm/config/default/manager_webhook_patch.yaml b/controlplane/kubeadm/config/default/manager_webhook_patch.yaml index bccef6d70db8..348e516e20dd 100644 --- a/controlplane/kubeadm/config/default/manager_webhook_patch.yaml +++ b/controlplane/kubeadm/config/default/manager_webhook_patch.yaml @@ -19,4 +19,4 @@ spec: volumes: - name: cert secret: - secretName: $(SERVICE_NAME)-cert + secretName: capi-kubeadm-control-plane-webhook-service-cert diff --git a/controlplane/kubeadm/config/default/webhookcainjection_patch.yaml b/controlplane/kubeadm/config/default/webhookcainjection_patch.yaml index 02ab515d4281..ead37fab5177 100644 --- a/controlplane/kubeadm/config/default/webhookcainjection_patch.yaml +++ b/controlplane/kubeadm/config/default/webhookcainjection_patch.yaml @@ -1,15 +1,15 @@ # This patch add annotation to admission webhook config and -# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. +# the variables CERTIFICATE_NAMESPACE and CERTIFICATE_NAME will be substituted by kustomize. apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: name: mutating-webhook-configuration annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: name: validating-webhook-configuration annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME diff --git a/controlplane/kubeadm/config/manager/manager.yaml b/controlplane/kubeadm/config/manager/manager.yaml index 34e85f86e86d..8947d31b045c 100644 --- a/controlplane/kubeadm/config/manager/manager.yaml +++ b/controlplane/kubeadm/config/manager/manager.yaml @@ -20,8 +20,10 @@ spec: - /manager args: - "--leader-elect" - - "--metrics-bind-addr=localhost:8080" - - "--feature-gates=ClusterTopology=${CLUSTER_TOPOLOGY:=false},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false}" + - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" + - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" + - "--use-deprecated-infra-machine-naming=${CAPI_USE_DEPRECATED_INFRA_MACHINE_NAMING:=false}" + - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},ClusterTopology=${CLUSTER_TOPOLOGY:=false},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false}" image: controller:latest name: manager env: @@ -41,6 +43,9 @@ spec: - containerPort: 9440 name: healthz protocol: TCP + - containerPort: 8443 + name: metrics + protocol: TCP readinessProbe: httpGet: path: /readyz @@ -57,6 +62,7 @@ spec: privileged: false runAsUser: 65532 runAsGroup: 65532 + terminationMessagePolicy: FallbackToLogsOnError terminationGracePeriodSeconds: 10 serviceAccountName: manager tolerations: diff --git a/controlplane/kubeadm/config/rbac/role.yaml b/controlplane/kubeadm/config/rbac/role.yaml index edf8324f3d80..138712d74d1f 100644 --- a/controlplane/kubeadm/config/rbac/role.yaml +++ b/controlplane/kubeadm/config/rbac/role.yaml @@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: @@ -13,6 +12,18 @@ rules: - get - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create - apiGroups: - bootstrap.cluster.x-k8s.io - controlplane.cluster.x-k8s.io @@ -36,6 +47,14 @@ rules: - get - list - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - machinepools + verbs: + - get + - list + - watch - apiGroups: - cluster.x-k8s.io resources: @@ -55,10 +74,6 @@ rules: - events verbs: - create - - get - - list - - patch - - watch - apiGroups: - "" resources: diff --git a/controlplane/kubeadm/config/webhook/kustomizeconfig.yaml b/controlplane/kubeadm/config/webhook/kustomizeconfig.yaml index 25e21e3c963f..6d782eb8e350 100644 --- a/controlplane/kubeadm/config/webhook/kustomizeconfig.yaml +++ b/controlplane/kubeadm/config/webhook/kustomizeconfig.yaml @@ -21,5 +21,3 @@ namespace: path: webhooks/clientConfig/service/namespace create: true -varReference: -- path: metadata/annotations diff --git a/controlplane/kubeadm/config/webhook/manifests.yaml b/controlplane/kubeadm/config/webhook/manifests.yaml index 607a3aebc9c1..516b8aa3b635 100644 --- a/controlplane/kubeadm/config/webhook/manifests.yaml +++ b/controlplane/kubeadm/config/webhook/manifests.yaml @@ -2,7 +2,6 @@ apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: - creationTimestamp: null name: mutating-webhook-configuration webhooks: - admissionReviewVersions: @@ -52,7 +51,6 @@ webhooks: apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - creationTimestamp: null name: validating-webhook-configuration webhooks: - admissionReviewVersions: @@ -62,20 +60,19 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplane + path: /validate-scale-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplane failurePolicy: Fail matchPolicy: Equivalent - name: validation.kubeadmcontrolplane.controlplane.cluster.x-k8s.io + name: validation-scale.kubeadmcontrolplane.controlplane.cluster.x-k8s.io rules: - apiGroups: - controlplane.cluster.x-k8s.io apiVersions: - v1beta1 operations: - - CREATE - UPDATE resources: - - kubeadmcontrolplanes + - kubeadmcontrolplanes/scale sideEffects: None - admissionReviewVersions: - v1 @@ -84,9 +81,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplanetemplate + path: /validate-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplane failurePolicy: Fail - name: validation.kubeadmcontrolplanetemplate.controlplane.cluster.x-k8s.io + matchPolicy: Equivalent + name: validation.kubeadmcontrolplane.controlplane.cluster.x-k8s.io rules: - apiGroups: - controlplane.cluster.x-k8s.io @@ -96,7 +94,7 @@ webhooks: - CREATE - UPDATE resources: - - kubeadmcontrolplanetemplates + - kubeadmcontrolplanes sideEffects: None - admissionReviewVersions: - v1 @@ -105,17 +103,17 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-scale-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplane + path: /validate-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplanetemplate failurePolicy: Fail - matchPolicy: Equivalent - name: validation-scale.kubeadmcontrolplane.controlplane.cluster.x-k8s.io + name: validation.kubeadmcontrolplanetemplate.controlplane.cluster.x-k8s.io rules: - apiGroups: - controlplane.cluster.x-k8s.io apiVersions: - v1beta1 operations: + - CREATE - UPDATE resources: - - kubeadmcontrolplanes/scale + - kubeadmcontrolplanetemplates sideEffects: None diff --git a/controlplane/kubeadm/controllers/alias.go b/controlplane/kubeadm/controllers/alias.go index f5584c0dd86e..270f5222ba61 100644 --- a/controlplane/kubeadm/controllers/alias.go +++ b/controlplane/kubeadm/controllers/alias.go @@ -30,25 +30,29 @@ import ( // KubeadmControlPlaneReconciler reconciles a KubeadmControlPlane object. type KubeadmControlPlaneReconciler struct { - Client client.Client - APIReader client.Reader - Tracker *remote.ClusterCacheTracker + Client client.Client + SecretCachingClient client.Client + Tracker *remote.ClusterCacheTracker EtcdDialTimeout time.Duration EtcdCallTimeout time.Duration // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string + + // Deprecated: DeprecatedInfraMachineNaming. Name the InfraStructureMachines after the InfraMachineTemplate. + DeprecatedInfraMachineNaming bool } // SetupWithManager sets up the reconciler with the Manager. func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&kubeadmcontrolplanecontrollers.KubeadmControlPlaneReconciler{ - Client: r.Client, - APIReader: r.APIReader, - Tracker: r.Tracker, - EtcdDialTimeout: r.EtcdDialTimeout, - EtcdCallTimeout: r.EtcdCallTimeout, - WatchFilterValue: r.WatchFilterValue, + Client: r.Client, + SecretCachingClient: r.SecretCachingClient, + Tracker: r.Tracker, + EtcdDialTimeout: r.EtcdDialTimeout, + EtcdCallTimeout: r.EtcdCallTimeout, + WatchFilterValue: r.WatchFilterValue, + DeprecatedInfraMachineNaming: r.DeprecatedInfraMachineNaming, }).SetupWithManager(ctx, mgr, options) } diff --git a/controlplane/kubeadm/internal/cluster.go b/controlplane/kubeadm/internal/cluster.go index 47c4e419b946..67a6d3aa3882 100644 --- a/controlplane/kubeadm/internal/cluster.go +++ b/controlplane/kubeadm/internal/cluster.go @@ -25,6 +25,8 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -50,10 +52,11 @@ type ManagementCluster interface { // Management holds operations on the management cluster. type Management struct { - Client client.Reader - Tracker *remote.ClusterCacheTracker - EtcdDialTimeout time.Duration - EtcdCallTimeout time.Duration + Client client.Reader + SecretCachingClient client.Reader + Tracker *remote.ClusterCacheTracker + EtcdDialTimeout time.Duration + EtcdCallTimeout time.Duration } // RemoteClusterConnectionError represents a failure to connect to a remote cluster. @@ -102,32 +105,18 @@ func (m *Management) GetMachinePoolsForCluster(ctx context.Context, cluster *clu func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) { // TODO(chuckha): Inject this dependency. // TODO(chuckha): memoize this function. The workload client only exists as long as a reconciliation loop. - restConfig, err := remote.RESTConfig(ctx, KubeadmControlPlaneControllerName, m.Client, clusterKey) + restConfig, err := m.Tracker.GetRESTConfig(ctx, clusterKey) if err != nil { - return nil, err + return nil, &RemoteClusterConnectionError{Name: clusterKey.String(), Err: err} } + restConfig = rest.CopyConfig(restConfig) restConfig.Timeout = 30 * time.Second - if m.Tracker == nil { - return nil, errors.New("Cannot get WorkloadCluster: No remote Cluster Cache") - } - c, err := m.Tracker.GetClient(ctx, clusterKey) if err != nil { - return nil, err - } - - clientConfig, err := m.Tracker.GetRESTConfig(ctx, clusterKey) - if err != nil { - return nil, err + return nil, &RemoteClusterConnectionError{Name: clusterKey.String(), Err: err} } - // Make sure we use the same CA and Host as the client. - // Note: This has to be done to be able to communicate directly on self-hosted clusters. - restConfig.CAData = clientConfig.CAData - restConfig.CAFile = clientConfig.CAFile - restConfig.Host = clientConfig.Host - // Retrieves the etcd CA key Pair crtData, keyData, err := m.getEtcdCAKeyPair(ctx, clusterKey) if err != nil { @@ -141,7 +130,12 @@ func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.O // TODO: consider if we can detect if we are using external etcd in a more explicit way (e.g. looking at the config instead of deriving from the existing certificates) var clientCert tls.Certificate if keyData != nil { - clientCert, err = generateClientCert(crtData, keyData) + clientKey, err := m.Tracker.GetEtcdClientCertificateKey(ctx, clusterKey) + if err != nil { + return nil, err + } + + clientCert, err = generateClientCert(crtData, keyData, clientKey) if err != nil { return nil, err } @@ -174,9 +168,21 @@ func (m *Management) getEtcdCAKeyPair(ctx context.Context, clusterKey client.Obj Namespace: clusterKey.Namespace, Name: fmt.Sprintf("%s-etcd", clusterKey.Name), } - if err := m.Client.Get(ctx, etcdCAObjectKey, etcdCASecret); err != nil { - return nil, nil, errors.Wrapf(err, "failed to get secret; etcd CA bundle %s/%s", etcdCAObjectKey.Namespace, etcdCAObjectKey.Name) + + // Try to get the certificate via the cached client. + err := m.SecretCachingClient.Get(ctx, etcdCAObjectKey, etcdCASecret) + if err != nil { + if !apierrors.IsNotFound(err) { + // Return error if we got an errors which is not a NotFound error. + return nil, nil, errors.Wrapf(err, "failed to get secret; etcd CA bundle %s/%s", etcdCAObjectKey.Namespace, etcdCAObjectKey.Name) + } + + // Try to get the certificate via the uncached client. + if err := m.Client.Get(ctx, etcdCAObjectKey, etcdCASecret); err != nil { + return nil, nil, errors.Wrapf(err, "failed to get secret; etcd CA bundle %s/%s", etcdCAObjectKey.Namespace, etcdCAObjectKey.Name) + } } + crtData, ok := etcdCASecret.Data[secret.TLSCrtDataName] if !ok { return nil, nil, errors.Errorf("etcd tls crt does not exist for cluster %s/%s", clusterKey.Namespace, clusterKey.Name) diff --git a/controlplane/kubeadm/internal/cluster_labels.go b/controlplane/kubeadm/internal/cluster_labels.go index 6cd10d4fe8f3..44468b6aa067 100644 --- a/controlplane/kubeadm/internal/cluster_labels.go +++ b/controlplane/kubeadm/internal/cluster_labels.go @@ -19,7 +19,7 @@ package internal import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - capilabels "sigs.k8s.io/cluster-api/internal/labels" + "sigs.k8s.io/cluster-api/util/labels/format" ) // ControlPlaneMachineLabelsForCluster returns a set of labels to add to a control plane machine for this specific cluster. @@ -36,6 +36,6 @@ func ControlPlaneMachineLabelsForCluster(kcp *controlplanev1.KubeadmControlPlane labels[clusterv1.ClusterNameLabel] = clusterName labels[clusterv1.MachineControlPlaneLabel] = "" // Note: MustFormatValue is used here as the label value can be a hash if the control plane name is longer than 63 characters. - labels[clusterv1.MachineControlPlaneNameLabel] = capilabels.MustFormatValue(kcp.Name) + labels[clusterv1.MachineControlPlaneNameLabel] = format.MustFormatValue(kcp.Name) return labels } diff --git a/controlplane/kubeadm/internal/cluster_test.go b/controlplane/kubeadm/internal/cluster_test.go index f1944712c76d..233c67327e3d 100644 --- a/controlplane/kubeadm/internal/cluster_test.go +++ b/controlplane/kubeadm/internal/cluster_test.go @@ -58,12 +58,12 @@ func TestGetMachinesForCluster(t *testing.T) { }, } machines, err := m.GetMachinesForCluster(ctx, cluster) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(machines).To(HaveLen(3)) // Test the ControlPlaneMachines works machines, err = m.GetMachinesForCluster(ctx, cluster, collections.ControlPlaneMachines("my-cluster")) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(machines).To(HaveLen(1)) // Test that the filters use AND logic instead of OR logic @@ -71,7 +71,7 @@ func TestGetMachinesForCluster(t *testing.T) { return cluster.Name == "first-machine" } machines, err = m.GetMachinesForCluster(ctx, cluster, collections.ControlPlaneMachines("my-cluster"), nameFilter) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(machines).To(HaveLen(1)) } @@ -93,6 +93,9 @@ func TestGetWorkloadCluster(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster-etcd", Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "my-cluster", + }, }, Data: map[string][]byte{ secret.TLSCrtDataName: certs.EncodeCertPEM(cert), @@ -105,14 +108,6 @@ func TestGetWorkloadCluster(t *testing.T) { delete(emptyKeyEtcdSecret.Data, secret.TLSKeyDataName) badCrtEtcdSecret := etcdSecret.DeepCopy() badCrtEtcdSecret.Data[secret.TLSCrtDataName] = []byte("bad cert") - tracker, err := remote.NewClusterCacheTracker( - env.Manager, - remote.ClusterCacheTrackerOptions{ - Log: &log.Log, - Indexes: remote.DefaultIndexes, - }, - ) - g.Expect(err).ToNot(HaveOccurred()) // Create kubeconfig secret // Store the envtest config as the contents of the kubeconfig secret. @@ -128,6 +123,9 @@ func TestGetWorkloadCluster(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster-kubeconfig", Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "my-cluster", + }, }, Data: map[string][]byte{ secret.KubeconfigDataName: testEnvKubeconfig, @@ -187,17 +185,26 @@ func TestGetWorkloadCluster(t *testing.T) { g := NewWithT(t) for _, o := range tt.objs { - g.Expect(env.Client.Create(ctx, o)).To(Succeed()) + g.Expect(env.CreateAndWait(ctx, o)).To(Succeed()) defer func(do client.Object) { - g.Expect(env.Cleanup(ctx, do)).To(Succeed()) + g.Expect(env.CleanupAndWait(ctx, do)).To(Succeed()) }(o) } - // Note: The API reader is intentionally used instead of the regular (cached) client - // to avoid test failures when the local cache isn't able to catch up in time. + // We have to create a new ClusterCacheTracker for every test case otherwise + // it could still have a rest config from a previous run cached. + tracker, err := remote.NewClusterCacheTracker( + env.Manager, + remote.ClusterCacheTrackerOptions{ + Log: &log.Log, + }, + ) + g.Expect(err).ToNot(HaveOccurred()) + m := Management{ - Client: env.GetAPIReader(), - Tracker: tracker, + Client: env.GetClient(), + SecretCachingClient: secretCachingClient, + Tracker: tracker, } workloadCluster, err := m.GetWorkloadCluster(ctx, tt.clusterKey) diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index b81f01d4e2a4..bfed636b8326 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -51,10 +51,13 @@ type ControlPlane struct { // See discussion on https://github.com/kubernetes-sigs/cluster-api/pull/3405 KubeadmConfigs map[string]*bootstrapv1.KubeadmConfig InfraResources map[string]*unstructured.Unstructured + + managementCluster ManagementCluster + workloadCluster WorkloadCluster } // NewControlPlane returns an instantiated ControlPlane. -func NewControlPlane(ctx context.Context, client client.Client, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, ownedMachines collections.Machines) (*ControlPlane, error) { +func NewControlPlane(ctx context.Context, managementCluster ManagementCluster, client client.Client, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, ownedMachines collections.Machines) (*ControlPlane, error) { infraObjects, err := getInfraResources(ctx, client, ownedMachines) if err != nil { return nil, err @@ -67,7 +70,7 @@ func NewControlPlane(ctx context.Context, client client.Client, cluster *cluster for _, machine := range ownedMachines { patchHelper, err := patch.NewHelper(machine, client) if err != nil { - return nil, errors.Wrapf(err, "failed to create patch helper for machine %s", machine.Name) + return nil, err } patchHelpers[machine.Name] = patchHelper } @@ -80,6 +83,7 @@ func NewControlPlane(ctx context.Context, client client.Client, cluster *cluster KubeadmConfigs: kubeadmConfigs, InfraResources: infraObjects, reconciliationTime: metav1.Now(), + managementCluster: managementCluster, }, nil } @@ -92,8 +96,8 @@ func (c *ControlPlane) FailureDomains() clusterv1.FailureDomains { } // MachineInFailureDomainWithMostMachines returns the first matching failure domain with machines that has the most control-plane machines on it. -func (c *ControlPlane) MachineInFailureDomainWithMostMachines(machines collections.Machines) (*clusterv1.Machine, error) { - fd := c.FailureDomainWithMostMachines(machines) +func (c *ControlPlane) MachineInFailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) (*clusterv1.Machine, error) { + fd := c.FailureDomainWithMostMachines(ctx, machines) machinesInFailureDomain := machines.Filter(collections.InFailureDomains(fd)) machineToMark := machinesInFailureDomain.Oldest() if machineToMark == nil { @@ -112,7 +116,7 @@ func (c *ControlPlane) MachineWithDeleteAnnotation(machines collections.Machines // FailureDomainWithMostMachines returns a fd which exists both in machines and control-plane machines and has the most // control-plane machines on it. -func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machines) *string { +func (c *ControlPlane) FailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) *string { // See if there are any Machines that are not in currently defined failure domains first. notInFailureDomains := machines.Filter( collections.Not(collections.InFailureDomains(c.FailureDomains().FilterControlPlane().GetIDs()...)), @@ -123,15 +127,19 @@ func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machin // in the cluster status. return notInFailureDomains.Oldest().Spec.FailureDomain } - return failuredomains.PickMost(c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines) + return failuredomains.PickMost(ctx, c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines) } // NextFailureDomainForScaleUp returns the failure domain with the fewest number of up-to-date machines. -func (c *ControlPlane) NextFailureDomainForScaleUp() *string { +func (c *ControlPlane) NextFailureDomainForScaleUp(ctx context.Context) (*string, error) { if len(c.Cluster.Status.FailureDomains.FilterControlPlane()) == 0 { - return nil + return nil, nil + } + upToDateMachines, err := c.UpToDateMachines() + if err != nil { + return nil, errors.Wrapf(err, "failed to determine next failure domain for scale up") } - return failuredomains.PickFewest(c.FailureDomains().FilterControlPlane(), c.UpToDateMachines()) + return failuredomains.PickFewest(ctx, c.FailureDomains().FilterControlPlane(), upToDateMachines), nil } // InitialControlPlaneConfig returns a new KubeadmConfigSpec that is to be used for an initializing control plane. @@ -163,22 +171,40 @@ func (c *ControlPlane) GetKubeadmConfig(machineName string) (*bootstrapv1.Kubead } // MachinesNeedingRollout return a list of machines that need to be rolled out. -func (c *ControlPlane) MachinesNeedingRollout() collections.Machines { +func (c *ControlPlane) MachinesNeedingRollout() (collections.Machines, map[string]string, error) { // Ignore machines to be deleted. machines := c.Machines.Filter(collections.Not(collections.HasDeletionTimestamp)) // Return machines if they are scheduled for rollout or if with an outdated configuration. - return machines.Filter( - NeedsRollout(&c.reconciliationTime, c.KCP.Spec.RolloutAfter, c.KCP.Spec.RolloutBefore, c.InfraResources, c.KubeadmConfigs, c.KCP), - ) + machinesNeedingRollout := make(collections.Machines, len(machines)) + rolloutReasons := map[string]string{} + for _, m := range machines { + reason, needsRollout, err := NeedsRollout(&c.reconciliationTime, c.KCP.Spec.RolloutAfter, c.KCP.Spec.RolloutBefore, c.InfraResources, c.KubeadmConfigs, c.KCP, m) + if err != nil { + return nil, nil, err + } + if needsRollout { + machinesNeedingRollout.Insert(m) + rolloutReasons[m.Name] = reason + } + } + return machinesNeedingRollout, rolloutReasons, nil } // UpToDateMachines returns the machines that are up to date with the control // plane's configuration and therefore do not require rollout. -func (c *ControlPlane) UpToDateMachines() collections.Machines { - return c.Machines.Filter( - collections.Not(NeedsRollout(&c.reconciliationTime, c.KCP.Spec.RolloutAfter, c.KCP.Spec.RolloutBefore, c.InfraResources, c.KubeadmConfigs, c.KCP)), - ) +func (c *ControlPlane) UpToDateMachines() (collections.Machines, error) { + upToDateMachines := make(collections.Machines, len(c.Machines)) + for _, m := range c.Machines { + _, needsRollout, err := NeedsRollout(&c.reconciliationTime, c.KCP.Spec.RolloutAfter, c.KCP.Spec.RolloutBefore, c.InfraResources, c.KubeadmConfigs, c.KCP, m) + if err != nil { + return nil, err + } + if !needsRollout { + upToDateMachines.Insert(m) + } + } + return upToDateMachines, nil } // getInfraResources fetches the external infrastructure resource for each machine in the collection and returns a map of machine.Name -> infraResource. @@ -222,19 +248,31 @@ func (c *ControlPlane) IsEtcdManaged() bool { return c.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration == nil || c.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External == nil } -// UnhealthyMachines returns the list of control plane machines marked as unhealthy by MHC. -func (c *ControlPlane) UnhealthyMachines() collections.Machines { +// UnhealthyMachinesWithUnhealthyControlPlaneComponents returns all unhealthy control plane machines that +// have unhealthy control plane components. +// It differs from UnhealthyMachinesByHealthCheck which checks `MachineHealthCheck` conditions. +func (c *ControlPlane) UnhealthyMachinesWithUnhealthyControlPlaneComponents(machines collections.Machines) collections.Machines { + return machines.Filter(collections.HasUnhealthyControlPlaneComponents(c.IsEtcdManaged())) +} + +// UnhealthyMachinesByMachineHealthCheck returns the list of control plane machines marked as unhealthy by Machine Health Check. +func (c *ControlPlane) UnhealthyMachinesByMachineHealthCheck() collections.Machines { return c.Machines.Filter(collections.HasUnhealthyCondition) } -// HealthyMachines returns the list of control plane machines not marked as unhealthy by MHC. -func (c *ControlPlane) HealthyMachines() collections.Machines { +// HealthyMachinesByMachineHealthCheck returns the list of control plane machines not marked as unhealthy by Machine Health Check. +func (c *ControlPlane) HealthyMachinesByMachineHealthCheck() collections.Machines { return c.Machines.Filter(collections.Not(collections.HasUnhealthyCondition)) } -// HasUnhealthyMachine returns true if any machine in the control plane is marked as unhealthy by MHC. -func (c *ControlPlane) HasUnhealthyMachine() bool { - return len(c.UnhealthyMachines()) > 0 +// HasUnhealthyMachineByMachineHealthCheck returns true if any machine in the control plane is marked as unhealthy by Machine Health Check. +func (c *ControlPlane) HasUnhealthyMachineByMachineHealthCheck() bool { + return len(c.UnhealthyMachinesByMachineHealthCheck()) > 0 +} + +// HasHealthyMachineStillProvisioning returns true if any healthy machine in the control plane is still in the process of being provisioned. +func (c *ControlPlane) HasHealthyMachineStillProvisioning() bool { + return len(c.HealthyMachinesByMachineHealthCheck().Filter(collections.Not(collections.HasNode()))) > 0 } // PatchMachines patches all the machines conditions. @@ -250,7 +288,7 @@ func (c *ControlPlane) PatchMachines(ctx context.Context) error { controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.MachineEtcdMemberHealthyCondition, }}); err != nil { - errList = append(errList, errors.Wrapf(err, "failed to patch machine %s", machine.Name)) + errList = append(errList, err) } continue } @@ -261,5 +299,35 @@ func (c *ControlPlane) PatchMachines(ctx context.Context) error { // SetPatchHelpers updates the patch helpers. func (c *ControlPlane) SetPatchHelpers(patchHelpers map[string]*patch.Helper) { - c.machinesPatchHelpers = patchHelpers + if c.machinesPatchHelpers == nil { + c.machinesPatchHelpers = map[string]*patch.Helper{} + } + for machineName, patchHelper := range patchHelpers { + c.machinesPatchHelpers[machineName] = patchHelper + } +} + +// GetWorkloadCluster builds a cluster object. +// The cluster comes with an etcd client generator to connect to any etcd pod living on a managed machine. +func (c *ControlPlane) GetWorkloadCluster(ctx context.Context) (WorkloadCluster, error) { + if c.workloadCluster != nil { + return c.workloadCluster, nil + } + + workloadCluster, err := c.managementCluster.GetWorkloadCluster(ctx, client.ObjectKeyFromObject(c.Cluster)) + if err != nil { + return nil, err + } + c.workloadCluster = workloadCluster + return c.workloadCluster, nil +} + +// InjectTestManagementCluster allows to inject a test ManagementCluster during tests. +// NOTE: This approach allows to keep the managementCluster field private, which will +// prevent people from using managementCluster.GetWorkloadCluster because it creates a new +// instance of WorkloadCluster at every call. People instead should use ControlPlane.GetWorkloadCluster +// that creates only a single instance of WorkloadCluster for each reconcile. +func (c *ControlPlane) InjectTestManagementCluster(managementCluster ManagementCluster) { + c.managementCluster = managementCluster + c.workloadCluster = nil } diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index 8f8f022052dc..6d6d50c82a4e 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -20,6 +20,7 @@ import ( "testing" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -51,42 +52,103 @@ func TestControlPlane(t *testing.T) { }, } - t.Run("With all machines in known failure domain, should return the FD with most number of machines", func(t *testing.T) { - g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("two")) + t.Run("With all machines in known failure domain, should return the FD with most number of machines", func(*testing.T) { + g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("two")) }) - t.Run("With some machines in non defined failure domains", func(t *testing.T) { + t.Run("With some machines in non defined failure domains", func(*testing.T) { controlPlane.Machines.Insert(machine("machine-5", withFailureDomain("unknown"))) - g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("unknown")) + g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("unknown")) }) }) } func TestHasUnhealthyMachine(t *testing.T) { // healthy machine (without MachineHealthCheckSucceded condition) - healthyMachine1 := &clusterv1.Machine{} + healthyMachine1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "healthyMachine1"}} // healthy machine (with MachineHealthCheckSucceded == true) - healthyMachine2 := &clusterv1.Machine{} + healthyMachine2 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "healthyMachine2"}} conditions.MarkTrue(healthyMachine2, clusterv1.MachineHealthCheckSucceededCondition) // unhealthy machine NOT eligible for KCP remediation (with MachineHealthCheckSucceded == False, but without MachineOwnerRemediated condition) - unhealthyMachineNOTOwnerRemediated := &clusterv1.Machine{} - conditions.MarkFalse(unhealthyMachineNOTOwnerRemediated, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") + unhealthyMachineNOTOwnerRemediated := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineNOTOwnerRemediated"}} + conditions.MarkFalse(unhealthyMachineNOTOwnerRemediated, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") // unhealthy machine eligible for KCP remediation (with MachineHealthCheckSucceded == False, with MachineOwnerRemediated condition) - unhealthyMachineOwnerRemediated := &clusterv1.Machine{} - conditions.MarkFalse(unhealthyMachineOwnerRemediated, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") - conditions.MarkFalse(unhealthyMachineOwnerRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") - - c := ControlPlane{ - Machines: collections.FromMachines( - healthyMachine1, - healthyMachine2, - unhealthyMachineNOTOwnerRemediated, - unhealthyMachineOwnerRemediated, - ), - } + unhealthyMachineOwnerRemediated := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineOwnerRemediated"}} + conditions.MarkFalse(unhealthyMachineOwnerRemediated, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") + conditions.MarkFalse(unhealthyMachineOwnerRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") + + t.Run("One unhealthy machine to be remediated by KCP", func(t *testing.T) { + c := ControlPlane{ + Machines: collections.FromMachines( + healthyMachine1, // healthy machine, should be ignored + healthyMachine2, // healthy machine, should be ignored (the MachineHealthCheckSucceededCondition is true) + unhealthyMachineNOTOwnerRemediated, // unhealthy machine, but KCP should not remediate it, should be ignored. + unhealthyMachineOwnerRemediated, + ), + } - g := NewWithT(t) - g.Expect(c.HasUnhealthyMachine()).To(BeTrue()) + g := NewWithT(t) + g.Expect(c.HasUnhealthyMachineByMachineHealthCheck()).To(BeTrue()) + }) + + t.Run("No unhealthy machine to be remediated by KCP", func(t *testing.T) { + c := ControlPlane{ + Machines: collections.FromMachines( + healthyMachine1, // healthy machine, should be ignored + healthyMachine2, // healthy machine, should be ignored (the MachineHealthCheckSucceededCondition is true) + unhealthyMachineNOTOwnerRemediated, // unhealthy machine, but KCP should not remediate it, should be ignored. + ), + } + + g := NewWithT(t) + g.Expect(c.HasUnhealthyMachineByMachineHealthCheck()).To(BeFalse()) + }) +} + +func TestHasHealthyMachineStillProvisioning(t *testing.T) { + // healthy machine (without MachineHealthCheckSucceded condition) still provisioning (without NodeRef) + healthyMachineStillProvisioning1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "healthyMachineStillProvisioning1"}} + + // healthy machine (without MachineHealthCheckSucceded condition) provisioned (with NodeRef) + healthyMachineProvisioned1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "healthyMachineProvisioned1"}} + healthyMachineProvisioned1.Status.NodeRef = &corev1.ObjectReference{} + + // unhealthy machine (with MachineHealthCheckSucceded condition) still provisioning (without NodeRef) + unhealthyMachineStillProvisioning1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineStillProvisioning1"}} + conditions.MarkFalse(unhealthyMachineStillProvisioning1, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") + conditions.MarkFalse(unhealthyMachineStillProvisioning1, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") + + // unhealthy machine (with MachineHealthCheckSucceded condition) provisioned (with NodeRef) + unhealthyMachineProvisioned1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineProvisioned1"}} + unhealthyMachineProvisioned1.Status.NodeRef = &corev1.ObjectReference{} + conditions.MarkFalse(unhealthyMachineProvisioned1, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "Something is wrong") + conditions.MarkFalse(unhealthyMachineProvisioned1, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") + + t.Run("Healthy machine still provisioning", func(t *testing.T) { + c := ControlPlane{ + Machines: collections.FromMachines( + healthyMachineStillProvisioning1, + unhealthyMachineStillProvisioning1, // unhealthy, should be ignored + healthyMachineProvisioned1, // already provisioned, should be ignored + unhealthyMachineProvisioned1, // unhealthy and already provisioned, should be ignored + ), + } + + g := NewWithT(t) + g.Expect(c.HasHealthyMachineStillProvisioning()).To(BeTrue()) + }) + t.Run("No machines still provisioning", func(t *testing.T) { + c := ControlPlane{ + Machines: collections.FromMachines( + unhealthyMachineStillProvisioning1, // unhealthy, should be ignored + healthyMachineProvisioned1, // already provisioned, should be ignored + unhealthyMachineProvisioned1, // unhealthy and already provisioned, should be ignored + ), + } + + g := NewWithT(t) + g.Expect(c.HasHealthyMachineStillProvisioning()).To(BeFalse()) + }) } type machineOpt func(*clusterv1.Machine) diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 56c0ae9a6a0c..7f76c63314c3 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -19,9 +19,10 @@ package controllers import ( "context" "fmt" + "strings" "time" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -29,13 +30,13 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/source" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" @@ -56,36 +57,38 @@ import ( "sigs.k8s.io/cluster-api/util/version" ) -const kcpManagerName = "capi-kubeadmcontrolplane" +const ( + kcpManagerName = "capi-kubeadmcontrolplane" + kubeadmControlPlaneKind = "KubeadmControlPlane" +) -// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch +// +kubebuilder:rbac:groups=core,resources=events,verbs=create // +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io;bootstrap.cluster.x-k8s.io;controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=get;list;watch // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch // KubeadmControlPlaneReconciler reconciles a KubeadmControlPlane object. type KubeadmControlPlaneReconciler struct { - Client client.Client - APIReader client.Reader - controller controller.Controller - recorder record.EventRecorder - Tracker *remote.ClusterCacheTracker + Client client.Client + SecretCachingClient client.Client + controller controller.Controller + recorder record.EventRecorder + Tracker *remote.ClusterCacheTracker + EtcdDialTimeout time.Duration EtcdCallTimeout time.Duration // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string + // Deprecated: DeprecatedInfraMachineNaming. Name the InfraStructureMachines after the InfraMachineTemplate. + DeprecatedInfraMachineNaming bool + managementCluster internal.ManagementCluster managementClusterUncached internal.ManagementCluster - - // disableInPlacePropagation should only be used for tests. This is used to skip - // some parts of the controller that need SSA as the current test setup does not - // support SSA. This flag should be dropped after all affected tests are migrated - // to envtest. - disableInPlacePropagation bool ssaCache ssa.Cache } @@ -95,25 +98,22 @@ func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mg Owns(&clusterv1.Machine{}). WithOptions(options). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Build(r) + Watches( + &clusterv1.Cluster{}, + handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmControlPlane), + builder.WithPredicates( + predicates.All(ctrl.LoggerFrom(ctx), + predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), + predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), + ), + ), + ).Build(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } - err = c.Watch( - &source.Kind{Type: &clusterv1.Cluster{}}, - handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmControlPlane), - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), - predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), - ), - ) - if err != nil { - return errors.Wrap(err, "failed adding Watch for Clusters to controller manager") - } - r.controller = c - r.recorder = mgr.GetEventRecorderFor("kubeadm-control-plane-controller") + r.recorder = mgr.GetEventRecorderFor("kubeadmcontrolplane-controller") r.ssaCache = ssa.NewCache() if r.managementCluster == nil { @@ -121,10 +121,11 @@ func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mg return errors.New("cluster cache tracker is nil, cannot create the internal management cluster resource") } r.managementCluster = &internal.Management{ - Client: r.Client, - Tracker: r.Tracker, - EtcdDialTimeout: r.EtcdDialTimeout, - EtcdCallTimeout: r.EtcdCallTimeout, + Client: r.Client, + SecretCachingClient: r.SecretCachingClient, + Tracker: r.Tracker, + EtcdDialTimeout: r.EtcdDialTimeout, + EtcdCallTimeout: r.EtcdCallTimeout, } } @@ -172,8 +173,9 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{Requeue: true}, nil } - // Add finalizer first if not exist to avoid the race condition between init and delete - if !controllerutil.ContainsFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer) { + // Add finalizer first if not set to avoid the race condition between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp is not set. + if kcp.ObjectMeta.DeletionTimestamp.IsZero() && !controllerutil.ContainsFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer) { controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer) // patch and return right away instead of reusing the main defer, @@ -181,21 +183,32 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. // Patch ObservedGeneration only if the reconciliation completed successfully patchOpts := []patch.Option{patch.WithStatusObservedGeneration{}} if err := patchHelper.Patch(ctx, kcp, patchOpts...); err != nil { - log.Error(err, "Failed to patch KubeadmControlPlane to add finalizer") - return ctrl.Result{}, err + return ctrl.Result{}, errors.Wrapf(err, "failed to add finalizer") } return ctrl.Result{}, nil } + // Initialize the control plane scope; this includes also checking for orphan machines and + // adopt them if necessary. + controlPlane, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp) + if err != nil { + return ctrl.Result{}, err + } + if adoptableMachineFound { + // if there are no errors but at least one CP machine has been adopted, then requeue and + // wait for the update event for the ownership to be set. + return ctrl.Result{}, nil + } + defer func() { // Always attempt to update status. - if err := r.updateStatus(ctx, kcp, cluster); err != nil { + if err := r.updateStatus(ctx, controlPlane); err != nil { var connFailure *internal.RemoteClusterConnectionError if errors.As(err, &connFailure) { - log.Info("Could not connect to workload cluster to fetch status", "err", err.Error()) + log.Error(err, "Could not connect to workload cluster to fetch status") } else { - log.Error(err, "Failed to update KubeadmControlPlane Status") + log.Error(err, "Failed to update KubeadmControlPlane status") reterr = kerrors.NewAggregate([]error{reterr, err}) } } @@ -206,39 +219,94 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. reterr = kerrors.NewAggregate([]error{reterr, err}) } - // TODO: remove this as soon as we have a proper remote cluster cache in place. - // Make KCP to requeue in case status is not ready, so we can check for node status without waiting for a full resync (by default 10 minutes). - // Only requeue if we are not going in exponential backoff due to error, or if we are not already re-queueing, or if the object has a deletion timestamp. - if reterr == nil && !res.Requeue && res.RequeueAfter <= 0 && kcp.ObjectMeta.DeletionTimestamp.IsZero() { + // Only requeue if there is no error, Requeue or RequeueAfter and the object does not have a deletion timestamp. + if reterr == nil && res.IsZero() && kcp.ObjectMeta.DeletionTimestamp.IsZero() { + // Make KCP requeue in case node status is not ready, so we can check for node status without waiting for a full + // resync (by default 10 minutes). + // The alternative solution would be to watch the control plane nodes in the Cluster - similar to how the + // MachineSet and MachineHealthCheck controllers watch the nodes under their control. if !kcp.Status.Ready { res = ctrl.Result{RequeueAfter: 20 * time.Second} } + + // Make KCP requeue if ControlPlaneComponentsHealthyCondition is false so we can check for control plane component + // status without waiting for a full resync (by default 10 minutes). + // Otherwise this condition can lead to a delay in provisioning MachineDeployments when MachineSet preflight checks are enabled. + // The alternative solution to this requeue would be watching the relevant pods inside each workload cluster which would be very expensive. + if conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) { + res = ctrl.Result{RequeueAfter: 20 * time.Second} + } } }() if !kcp.ObjectMeta.DeletionTimestamp.IsZero() { // Handle deletion reconciliation loop. - res, err = r.reconcileDelete(ctx, cluster, kcp) + res, err = r.reconcileDelete(ctx, controlPlane) // Requeue if the reconcile failed because the ClusterCacheTracker was locked for // the current cluster because of concurrent access. if errors.Is(err, remote.ErrClusterLocked) { log.V(5).Info("Requeuing because another worker has the lock on the ClusterCacheTracker") - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{RequeueAfter: time.Minute}, nil } return res, err } // Handle normal reconciliation loop. - res, err = r.reconcile(ctx, cluster, kcp) + res, err = r.reconcile(ctx, controlPlane) // Requeue if the reconcile failed because the ClusterCacheTracker was locked for // the current cluster because of concurrent access. if errors.Is(err, remote.ErrClusterLocked) { log.V(5).Info("Requeuing because another worker has the lock on the ClusterCacheTracker") - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{RequeueAfter: time.Minute}, nil } return res, err } +// initControlPlaneScope initializes the control plane scope; this includes also checking for orphan machines and +// adopt them if necessary. +// The func also returns a boolean indicating if adoptableMachine have been found and processed, but this doesn't imply those machines +// have been actually adopted). +func (r *KubeadmControlPlaneReconciler) initControlPlaneScope(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (*internal.ControlPlane, bool, error) { + log := ctrl.LoggerFrom(ctx) + + // Return early if the cluster is not yet in a state where control plane machines exists + if !cluster.Status.InfrastructureReady || !cluster.Spec.ControlPlaneEndpoint.IsValid() { + controlPlane, err := internal.NewControlPlane(ctx, r.managementCluster, r.Client, cluster, kcp, collections.Machines{}) + if err != nil { + log.Error(err, "Failed to initialize control plane scope") + return nil, false, err + } + return controlPlane, false, nil + } + + // Read control plane machines + controlPlaneMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, cluster, collections.ControlPlaneMachines(cluster.Name)) + if err != nil { + log.Error(err, "Failed to retrieve control plane machines for cluster") + return nil, false, err + } + + // If we are not deleting the CP, adopt stand alone CP machines if any + adoptableMachines := controlPlaneMachines.Filter(collections.AdoptableControlPlaneMachines(cluster.Name)) + if kcp.ObjectMeta.DeletionTimestamp.IsZero() && len(adoptableMachines) > 0 { + return nil, true, r.adoptMachines(ctx, kcp, adoptableMachines, cluster) + } + + ownedMachines := controlPlaneMachines.Filter(collections.OwnedMachines(kcp)) + if kcp.ObjectMeta.DeletionTimestamp.IsZero() && len(ownedMachines) != len(controlPlaneMachines) { + err := errors.New("not all control plane machines are owned by this KubeadmControlPlane, refusing to operate in mixed management mode") + log.Error(err, "KCP cannot reconcile") + return nil, false, err + } + + controlPlane, err := internal.NewControlPlane(ctx, r.managementCluster, r.Client, cluster, kcp, ownedMachines) + if err != nil { + log.Error(err, "Failed to initialize control plane scope") + return nil, false, err + } + return controlPlane, false, nil +} + func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kcp *controlplanev1.KubeadmControlPlane) error { // Always update the readyCondition by summarizing the state of other conditions. conditions.SetSummary(kcp, @@ -270,98 +338,58 @@ func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kc } // reconcile handles KubeadmControlPlane reconciliation. -func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (res ctrl.Result, reterr error) { +func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPlane *internal.ControlPlane) (res ctrl.Result, reterr error) { log := ctrl.LoggerFrom(ctx) log.Info("Reconcile KubeadmControlPlane") // Make sure to reconcile the external infrastructure reference. - if err := r.reconcileExternalReference(ctx, cluster, &kcp.Spec.MachineTemplate.InfrastructureRef); err != nil { + if err := r.reconcileExternalReference(ctx, controlPlane.Cluster, &controlPlane.KCP.Spec.MachineTemplate.InfrastructureRef); err != nil { return ctrl.Result{}, err } // Wait for the cluster infrastructure to be ready before creating machines - if !cluster.Status.InfrastructureReady { + if !controlPlane.Cluster.Status.InfrastructureReady { log.Info("Cluster infrastructure is not ready yet") return ctrl.Result{}, nil } - // Generate Cluster Certificates if needed - config := kcp.Spec.KubeadmConfigSpec.DeepCopy() - config.JoinConfiguration = nil - if config.ClusterConfiguration == nil { - config.ClusterConfiguration = &bootstrapv1.ClusterConfiguration{} - } - certificates := secret.NewCertificatesForInitialControlPlane(config.ClusterConfiguration) - controllerRef := metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")) - if err := certificates.LookupOrGenerate(ctx, r.Client, util.ObjectKey(cluster), *controllerRef); err != nil { - log.Error(err, "unable to lookup or create cluster certificates") - conditions.MarkFalse(kcp, controlplanev1.CertificatesAvailableCondition, controlplanev1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + // Reconcile cluster certificates. + if err := r.reconcileClusterCertificates(ctx, controlPlane); err != nil { return ctrl.Result{}, err } - conditions.MarkTrue(kcp, controlplanev1.CertificatesAvailableCondition) // If ControlPlaneEndpoint is not set, return early - if !cluster.Spec.ControlPlaneEndpoint.IsValid() { + if !controlPlane.Cluster.Spec.ControlPlaneEndpoint.IsValid() { log.Info("Cluster does not yet have a ControlPlaneEndpoint defined") return ctrl.Result{}, nil } // Generate Cluster Kubeconfig if needed - if result, err := r.reconcileKubeconfig(ctx, cluster, kcp); !result.IsZero() || err != nil { + if result, err := r.reconcileKubeconfig(ctx, controlPlane); !result.IsZero() || err != nil { if err != nil { - log.Error(err, "failed to reconcile Kubeconfig") + log.Error(err, "Failed to reconcile Kubeconfig") } return result, err } - controlPlaneMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, cluster, collections.ControlPlaneMachines(cluster.Name)) - if err != nil { - log.Error(err, "failed to retrieve control plane machines for cluster") - return ctrl.Result{}, err - } - - adoptableMachines := controlPlaneMachines.Filter(collections.AdoptableControlPlaneMachines(cluster.Name)) - if len(adoptableMachines) > 0 { - // We adopt the Machines and then wait for the update event for the ownership reference to re-queue them so the cache is up-to-date - err = r.adoptMachines(ctx, kcp, adoptableMachines, cluster) - return ctrl.Result{}, err - } - if err := ensureCertificatesOwnerRef(ctx, r.Client, util.ObjectKey(cluster), certificates, *controllerRef); err != nil { - return ctrl.Result{}, err - } - - ownedMachines := controlPlaneMachines.Filter(collections.OwnedMachines(kcp)) - if len(ownedMachines) != len(controlPlaneMachines) { - log.Info("Not all control plane machines are owned by this KubeadmControlPlane, refusing to operate in mixed management mode") - return ctrl.Result{}, nil - } - - controlPlane, err := internal.NewControlPlane(ctx, r.Client, cluster, kcp, ownedMachines) - if err != nil { - log.Error(err, "failed to initialize control plane") - return ctrl.Result{}, err - } - - if !r.disableInPlacePropagation { - if err := r.syncMachines(ctx, controlPlane); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to sync Machines") - } + if err := r.syncMachines(ctx, controlPlane); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to sync Machines") } // Aggregate the operational state of all the machines; while aggregating we are adding the // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine. - conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef(), conditions.WithStepCounterIf(false)) + conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, controlPlane.Machines.ConditionGetters(), conditions.AddSourceRef()) // Updates conditions reporting the status of static pods and the status of the etcd cluster. // NOTE: Conditions reporting KCP operation progress like e.g. Resized or SpecUpToDate are inlined with the rest of the execution. - if result, err := r.reconcileControlPlaneConditions(ctx, controlPlane); err != nil || !result.IsZero() { - return result, err + if err := r.reconcileControlPlaneConditions(ctx, controlPlane); err != nil { + return ctrl.Result{}, err } // Ensures the number of etcd members is in sync with the number of machines/nodes. // NOTE: This is usually required after a machine deletion. - if result, err := r.reconcileEtcdMembers(ctx, controlPlane); err != nil || !result.IsZero() { - return result, err + if err := r.reconcileEtcdMembers(ctx, controlPlane); err != nil { + return ctrl.Result{}, err } // Reconcile unhealthy machines by triggering deletion and requeue if it is considered safe to remediate, @@ -370,18 +398,20 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * return result, err } - // Reconcile certificate expiry for machines that don't have the expiry annotation on KubeadmConfig yet. - if result, err := r.reconcileCertificateExpiries(ctx, controlPlane); err != nil || !result.IsZero() { - return result, err - } - // Control plane machines rollout due to configuration changes (e.g. upgrades) takes precedence over other operations. - needRollout := controlPlane.MachinesNeedingRollout() + machinesNeedingRollout, rolloutReasons, err := controlPlane.MachinesNeedingRollout() + if err != nil { + return ctrl.Result{}, err + } switch { - case len(needRollout) > 0: - log.Info("Rolling out Control Plane machines", "needRollout", needRollout.Names()) - conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition, controlplanev1.RollingUpdateInProgressReason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(needRollout), len(controlPlane.Machines)-len(needRollout)) - return r.upgradeControlPlane(ctx, cluster, kcp, controlPlane, needRollout) + case len(machinesNeedingRollout) > 0: + var reasons []string + for _, rolloutReason := range rolloutReasons { + reasons = append(reasons, rolloutReason) + } + log.Info(fmt.Sprintf("Rolling out Control Plane machines: %s", strings.Join(reasons, ",")), "machinesNeedingRollout", machinesNeedingRollout.Names()) + conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition, controlplanev1.RollingUpdateInProgressReason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(machinesNeedingRollout), len(controlPlane.Machines)-len(machinesNeedingRollout)) + return r.upgradeControlPlane(ctx, controlPlane, machinesNeedingRollout) default: // make sure last upgrade operation is marked as completed. // NOTE: we are checking the condition already exists in order to avoid to set this condition at the first @@ -392,30 +422,30 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * } // If we've made it this far, we can assume that all ownedMachines are up to date - numMachines := len(ownedMachines) - desiredReplicas := int(*kcp.Spec.Replicas) + numMachines := len(controlPlane.Machines) + desiredReplicas := int(*controlPlane.KCP.Spec.Replicas) switch { // We are creating the first replica case numMachines < desiredReplicas && numMachines == 0: // Create new Machine w/ init - log.Info("Initializing control plane", "Desired", desiredReplicas, "Existing", numMachines) + log.Info("Initializing control plane", "desired", desiredReplicas, "existing", numMachines) conditions.MarkFalse(controlPlane.KCP, controlplanev1.AvailableCondition, controlplanev1.WaitingForKubeadmInitReason, clusterv1.ConditionSeverityInfo, "") - return r.initializeControlPlane(ctx, cluster, kcp, controlPlane) + return r.initializeControlPlane(ctx, controlPlane) // We are scaling up case numMachines < desiredReplicas && numMachines > 0: // Create a new Machine w/ join - log.Info("Scaling up control plane", "Desired", desiredReplicas, "Existing", numMachines) - return r.scaleUpControlPlane(ctx, cluster, kcp, controlPlane) + log.Info("Scaling up control plane", "desired", desiredReplicas, "existing", numMachines) + return r.scaleUpControlPlane(ctx, controlPlane) // We are scaling down case numMachines > desiredReplicas: - log.Info("Scaling down control plane", "Desired", desiredReplicas, "Existing", numMachines) + log.Info("Scaling down control plane", "desired", desiredReplicas, "existing", numMachines) // The last parameter (i.e. machines needing to be rolled out) should always be empty here. - return r.scaleDownControlPlane(ctx, cluster, kcp, controlPlane, collections.Machines{}) + return r.scaleDownControlPlane(ctx, controlPlane, collections.Machines{}) } // Get the workload cluster client. - workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster)) + workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { log.V(2).Info("cannot get remote client to workload cluster, will requeue", "cause", err) return ctrl.Result{Requeue: true}, nil @@ -428,80 +458,107 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * // We intentionally only parse major/minor/patch so that the subsequent code // also already applies to beta versions of new releases. - parsedVersion, err := version.ParseMajorMinorPatchTolerant(kcp.Spec.Version) + parsedVersion, err := version.ParseMajorMinorPatchTolerant(controlPlane.KCP.Spec.Version) if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kcp.Spec.Version) + return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) } // Update kube-proxy daemonset. - if err := workloadCluster.UpdateKubeProxyImageInfo(ctx, kcp, parsedVersion); err != nil { - log.Error(err, "failed to update kube-proxy daemonset") + if err := workloadCluster.UpdateKubeProxyImageInfo(ctx, controlPlane.KCP, parsedVersion); err != nil { + log.Error(err, "Failed to update kube-proxy daemonset") return ctrl.Result{}, err } // Update CoreDNS deployment. - if err := workloadCluster.UpdateCoreDNS(ctx, kcp, parsedVersion); err != nil { + if err := workloadCluster.UpdateCoreDNS(ctx, controlPlane.KCP, parsedVersion); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to update CoreDNS deployment") } + // Reconcile certificate expiry for Machines that don't have the expiry annotation on KubeadmConfig yet. + // Note: This requires that all control plane machines are working. We moved this to the end of the reconcile + // as nothing in the same reconcile depends on it and to ensure it doesn't block anything else, + // especially MHC remediation and rollout of changes to recover the control plane. + if err := r.reconcileCertificateExpiries(ctx, controlPlane); err != nil { + return ctrl.Result{}, err + } return ctrl.Result{}, nil } +// reconcileClusterCertificates ensures that all the cluster certificates exists and +// enforces all the expected owner ref on them. +func (r *KubeadmControlPlaneReconciler) reconcileClusterCertificates(ctx context.Context, controlPlane *internal.ControlPlane) error { + log := ctrl.LoggerFrom(ctx) + + // Generate Cluster Certificates if needed + config := controlPlane.KCP.Spec.KubeadmConfigSpec.DeepCopy() + config.JoinConfiguration = nil + if config.ClusterConfiguration == nil { + config.ClusterConfiguration = &bootstrapv1.ClusterConfiguration{} + } + certificates := secret.NewCertificatesForInitialControlPlane(config.ClusterConfiguration) + controllerRef := metav1.NewControllerRef(controlPlane.KCP, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)) + if err := certificates.LookupOrGenerateCached(ctx, r.SecretCachingClient, r.Client, util.ObjectKey(controlPlane.Cluster), *controllerRef); err != nil { + log.Error(err, "Unable to lookup or create cluster certificates") + conditions.MarkFalse(controlPlane.KCP, controlplanev1.CertificatesAvailableCondition, controlplanev1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + return err + } + + if err := r.ensureCertificatesOwnerRef(ctx, certificates, *controllerRef); err != nil { + return err + } + + conditions.MarkTrue(controlPlane.KCP, controlplanev1.CertificatesAvailableCondition) + return nil +} + // reconcileDelete handles KubeadmControlPlane deletion. // The implementation does not take non-control plane workloads into consideration. This may or may not change in the future. // Please see https://github.com/kubernetes-sigs/cluster-api/issues/2064. -func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (ctrl.Result, error) { +func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) log.Info("Reconcile KubeadmControlPlane deletion") - // Gets all machines, not just control plane machines. - allMachines, err := r.managementCluster.GetMachinesForCluster(ctx, cluster) - if err != nil { - return ctrl.Result{}, err - } - ownedMachines := allMachines.Filter(collections.OwnedMachines(kcp)) - // If no control plane machines remain, remove the finalizer - if len(ownedMachines) == 0 { - controllerutil.RemoveFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer) + if len(controlPlane.Machines) == 0 { + controllerutil.RemoveFinalizer(controlPlane.KCP, controlplanev1.KubeadmControlPlaneFinalizer) return ctrl.Result{}, nil } - controlPlane, err := internal.NewControlPlane(ctx, r.Client, cluster, kcp, ownedMachines) - if err != nil { - log.Error(err, "failed to initialize control plane") - return ctrl.Result{}, err - } - // Updates conditions reporting the status of static pods and the status of the etcd cluster. // NOTE: Ignoring failures given that we are deleting - if _, err := r.reconcileControlPlaneConditions(ctx, controlPlane); err != nil { - log.Info("failed to reconcile conditions", "error", err.Error()) + if err := r.reconcileControlPlaneConditions(ctx, controlPlane); err != nil { + log.Error(err, "Failed to reconcile conditions") } // Aggregate the operational state of all the machines; while aggregating we are adding the // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine. // However, during delete we are hiding the counter (1 of x) because it does not make sense given that // all the machines are deleted in parallel. - conditions.SetAggregate(kcp, controlplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef(), conditions.WithStepCounterIf(false)) + conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, controlPlane.Machines.ConditionGetters(), conditions.AddSourceRef()) + + // Gets all machines, not just control plane machines. + allMachines, err := r.managementCluster.GetMachinesForCluster(ctx, controlPlane.Cluster) + if err != nil { + return ctrl.Result{}, err + } allMachinePools := &expv1.MachinePoolList{} // Get all machine pools. if feature.Gates.Enabled(feature.MachinePool) { - allMachinePools, err = r.managementCluster.GetMachinePoolsForCluster(ctx, cluster) + allMachinePools, err = r.managementCluster.GetMachinePoolsForCluster(ctx, controlPlane.Cluster) if err != nil { return ctrl.Result{}, err } } // Verify that only control plane machines remain - if len(allMachines) != len(ownedMachines) || len(allMachinePools.Items) != 0 { + if len(allMachines) != len(controlPlane.Machines) || len(allMachinePools.Items) != 0 { log.Info("Waiting for worker nodes to be deleted first") - conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "Waiting for worker nodes to be deleted first") + conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "Waiting for worker nodes to be deleted first") return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil } // Delete control plane machines in parallel - machinesToDelete := ownedMachines.Filter(collections.Not(collections.HasDeletionTimestamp)) + machinesToDelete := controlPlane.Machines.Filter(collections.Not(collections.HasDeletionTimestamp)) var errs []error for i := range machinesToDelete { m := machinesToDelete[i] @@ -513,24 +570,24 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu } if len(errs) > 0 { err := kerrors.NewAggregate(errs) - r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedDelete", - "Failed to delete control plane Machines for cluster %s/%s control plane: %v", cluster.Namespace, cluster.Name, err) + r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedDelete", + "Failed to delete control plane Machines for cluster %s control plane: %v", klog.KObj(controlPlane.Cluster), err) return ctrl.Result{}, err } - conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil } // ClusterToKubeadmControlPlane is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation // for KubeadmControlPlane based on updates to a Cluster. -func (r *KubeadmControlPlaneReconciler) ClusterToKubeadmControlPlane(o client.Object) []ctrl.Request { +func (r *KubeadmControlPlaneReconciler) ClusterToKubeadmControlPlane(_ context.Context, o client.Object) []ctrl.Request { c, ok := o.(*clusterv1.Cluster) if !ok { panic(fmt.Sprintf("Expected a Cluster but got a %T", o)) } controlPlaneRef := c.Spec.ControlPlaneRef - if controlPlaneRef != nil && controlPlaneRef.Kind == "KubeadmControlPlane" { + if controlPlaneRef != nil && controlPlaneRef.Kind == kubeadmControlPlaneKind { return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}} } @@ -576,7 +633,7 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro // TODO: This should be cleaned-up to have a more streamline way of constructing and using patchHelpers. patchHelper, err := patch.NewHelper(updatedMachine, r.Client) if err != nil { - return errors.Wrapf(err, "failed to create patch helper for Machine %s", klog.KObj(updatedMachine)) + return err } patchHelpers[machineName] = patchHelper @@ -584,32 +641,40 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro {"f:metadata", "f:annotations"}, {"f:metadata", "f:labels"}, } - infraMachine := controlPlane.InfraResources[machineName] - // Cleanup managed fields of all InfrastructureMachines to drop ownership of labels and annotations - // from "manager". We do this so that InfrastructureMachines that are created using the Create method - // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" - // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.DropManagedFields(ctx, r.Client, infraMachine, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { - return errors.Wrapf(err, "failed to clean up managedFields of InfrastructureMachine %s", klog.KObj(infraMachine)) - } - // Update in-place mutating fields on InfrastructureMachine. - if err := r.updateExternalObject(ctx, infraMachine, controlPlane.KCP, controlPlane.Cluster); err != nil { - return errors.Wrapf(err, "failed to update InfrastructureMachine %s", klog.KObj(infraMachine)) + infraMachine, infraMachineFound := controlPlane.InfraResources[machineName] + // Only update the InfraMachine if it is already found, otherwise just skip it. + // This could happen e.g. if the cache is not up-to-date yet. + if infraMachineFound { + // Cleanup managed fields of all InfrastructureMachines to drop ownership of labels and annotations + // from "manager". We do this so that InfrastructureMachines that are created using the Create method + // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" + // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. + if err := ssa.DropManagedFields(ctx, r.Client, infraMachine, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { + return errors.Wrapf(err, "failed to clean up managedFields of InfrastructureMachine %s", klog.KObj(infraMachine)) + } + // Update in-place mutating fields on InfrastructureMachine. + if err := r.updateExternalObject(ctx, infraMachine, controlPlane.KCP, controlPlane.Cluster); err != nil { + return errors.Wrapf(err, "failed to update InfrastructureMachine %s", klog.KObj(infraMachine)) + } } - kubeadmConfig := controlPlane.KubeadmConfigs[machineName] - // Note: Set the GroupVersionKind because updateExternalObject depends on it. - kubeadmConfig.SetGroupVersionKind(m.Spec.Bootstrap.ConfigRef.GroupVersionKind()) - // Cleanup managed fields of all KubeadmConfigs to drop ownership of labels and annotations - // from "manager". We do this so that KubeadmConfigs that are created using the Create method - // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" - // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.DropManagedFields(ctx, r.Client, kubeadmConfig, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { - return errors.Wrapf(err, "failed to clean up managedFields of KubeadmConfig %s", klog.KObj(kubeadmConfig)) - } - // Update in-place mutating fields on BootstrapConfig. - if err := r.updateExternalObject(ctx, kubeadmConfig, controlPlane.KCP, controlPlane.Cluster); err != nil { - return errors.Wrapf(err, "failed to update KubeadmConfig %s", klog.KObj(kubeadmConfig)) + kubeadmConfig, kubeadmConfigFound := controlPlane.KubeadmConfigs[machineName] + // Only update the KubeadmConfig if it is already found, otherwise just skip it. + // This could happen e.g. if the cache is not up-to-date yet. + if kubeadmConfigFound { + // Note: Set the GroupVersionKind because updateExternalObject depends on it. + kubeadmConfig.SetGroupVersionKind(m.Spec.Bootstrap.ConfigRef.GroupVersionKind()) + // Cleanup managed fields of all KubeadmConfigs to drop ownership of labels and annotations + // from "manager". We do this so that KubeadmConfigs that are created using the Create method + // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" + // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. + if err := ssa.DropManagedFields(ctx, r.Client, kubeadmConfig, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { + return errors.Wrapf(err, "failed to clean up managedFields of KubeadmConfig %s", klog.KObj(kubeadmConfig)) + } + // Update in-place mutating fields on BootstrapConfig. + if err := r.updateExternalObject(ctx, kubeadmConfig, controlPlane.KCP, controlPlane.Cluster); err != nil { + return errors.Wrapf(err, "failed to update KubeadmConfig %s", klog.KObj(kubeadmConfig)) + } } } // Update the patch helpers. @@ -619,16 +684,16 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro // reconcileControlPlaneConditions is responsible of reconciling conditions reporting the status of static pods and // the status of the etcd cluster. -func (r *KubeadmControlPlaneReconciler) reconcileControlPlaneConditions(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { +func (r *KubeadmControlPlaneReconciler) reconcileControlPlaneConditions(ctx context.Context, controlPlane *internal.ControlPlane) error { // If the cluster is not yet initialized, there is no way to connect to the workload cluster and fetch information // for updating conditions. Return early. if !controlPlane.KCP.Status.Initialized { - return ctrl.Result{}, nil + return nil } - workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(controlPlane.Cluster)) + workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { - return ctrl.Result{}, errors.Wrap(err, "cannot get remote client to workload cluster") + return errors.Wrap(err, "cannot get remote client to workload cluster") } // Update conditions status @@ -637,28 +702,28 @@ func (r *KubeadmControlPlaneReconciler) reconcileControlPlaneConditions(ctx cont // Patch machines with the updated conditions. if err := controlPlane.PatchMachines(ctx); err != nil { - return ctrl.Result{}, err + return err } // KCP will be patched at the end of Reconcile to reflect updated conditions, so we can return now. - return ctrl.Result{}, nil + return nil } // reconcileEtcdMembers ensures the number of etcd members is in sync with the number of machines/nodes. // This is usually required after a machine deletion. // // NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneConditions before this. -func (r *KubeadmControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { +func (r *KubeadmControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context, controlPlane *internal.ControlPlane) error { log := ctrl.LoggerFrom(ctx) // If etcd is not managed by KCP this is a no-op. if !controlPlane.IsEtcdManaged() { - return ctrl.Result{}, nil + return nil } // If there is no KCP-owned control-plane machines, then control-plane has not been initialized yet. if controlPlane.Machines.Len() == 0 { - return ctrl.Result{}, nil + return nil } // Collect all the node names. @@ -666,7 +731,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context for _, machine := range controlPlane.Machines { if machine.Status.NodeRef == nil { // If there are provisioning machines (machines without a node yet), return. - return ctrl.Result{}, nil + return nil } nodeNames = append(nodeNames, machine.Status.NodeRef.Name) } @@ -674,51 +739,51 @@ func (r *KubeadmControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context // Potential inconsistencies between the list of members and the list of machines/nodes are // surfaced using the EtcdClusterHealthyCondition; if this condition is true, meaning no inconsistencies exists, return early. if conditions.IsTrue(controlPlane.KCP, controlplanev1.EtcdClusterHealthyCondition) { - return ctrl.Result{}, nil + return nil } - workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(controlPlane.Cluster)) + workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { // Failing at connecting to the workload cluster can mean workload cluster is unhealthy for a variety of reasons such as etcd quorum loss. - return ctrl.Result{}, errors.Wrap(err, "cannot get remote client to workload cluster") + return errors.Wrap(err, "cannot get remote client to workload cluster") } parsedVersion, err := semver.ParseTolerant(controlPlane.KCP.Spec.Version) if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) + return errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) } removedMembers, err := workloadCluster.ReconcileEtcdMembers(ctx, nodeNames, parsedVersion) if err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed attempt to reconcile etcd members") + return errors.Wrap(err, "failed attempt to reconcile etcd members") } if len(removedMembers) > 0 { log.Info("Etcd members without nodes removed from the cluster", "members", removedMembers) } - return ctrl.Result{}, nil + return nil } -func (r *KubeadmControlPlaneReconciler) reconcileCertificateExpiries(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { +func (r *KubeadmControlPlaneReconciler) reconcileCertificateExpiries(ctx context.Context, controlPlane *internal.ControlPlane) error { log := ctrl.LoggerFrom(ctx) // Return if there are no KCP-owned control-plane machines. if controlPlane.Machines.Len() == 0 { - return ctrl.Result{}, nil + return nil } // Return if KCP is not yet initialized (no API server to contact for checking certificate expiration). if !controlPlane.KCP.Status.Initialized { - return ctrl.Result{}, nil + return nil } // Ignore machines which are being deleted. machines := controlPlane.Machines.Filter(collections.Not(collections.HasDeletionTimestamp)) - workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(controlPlane.Cluster)) + workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to reconcile certificate expiries: cannot get remote client to workload cluster") + return errors.Wrap(err, "failed to reconcile certificate expiries: cannot get remote client to workload cluster") } for _, m := range machines { @@ -746,14 +811,14 @@ func (r *KubeadmControlPlaneReconciler) reconcileCertificateExpiries(ctx context log.V(3).Info("Reconciling certificate expiry") certificateExpiry, err := workloadCluster.GetAPIServerCertificateExpiry(ctx, kubeadmConfig, nodeName) if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile certificate expiry for Machine/%s", m.Name) + return errors.Wrapf(err, "failed to reconcile certificate expiry for Machine/%s", m.Name) } expiry := certificateExpiry.Format(time.RFC3339) log.V(2).Info(fmt.Sprintf("Setting certificate expiry to %s", expiry)) patchHelper, err := patch.NewHelper(kubeadmConfig, r.Client) if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile certificate expiry for Machine/%s: failed to create PatchHelper for KubeadmConfig/%s", m.Name, kubeadmConfig.Name) + return errors.Wrapf(err, "failed to reconcile certificate expiry for Machine/%s", m.Name) } if annotations == nil { @@ -763,11 +828,11 @@ func (r *KubeadmControlPlaneReconciler) reconcileCertificateExpiries(ctx context kubeadmConfig.SetAnnotations(annotations) if err := patchHelper.Patch(ctx, kubeadmConfig); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile certificate expiry for Machine/%s: failed to patch KubeadmConfig/%s", m.Name, kubeadmConfig.Name) + return errors.Wrapf(err, "failed to reconcile certificate expiry for Machine/%s", m.Name) } } - return ctrl.Result{}, nil + return nil } func (r *KubeadmControlPlaneReconciler) adoptMachines(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, machines collections.Machines, cluster *clusterv1.Cluster) error { @@ -870,8 +935,8 @@ func (r *KubeadmControlPlaneReconciler) adoptOwnedSecrets(ctx context.Context, k Kind: "KubeadmControlPlane", Name: kcp.Name, UID: kcp.UID, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), })) if err := r.Client.Update(ctx, ss); err != nil { @@ -883,30 +948,36 @@ func (r *KubeadmControlPlaneReconciler) adoptOwnedSecrets(ctx context.Context, k } // ensureCertificatesOwnerRef ensures an ownerReference to the owner is added on the Secrets holding certificates. -func ensureCertificatesOwnerRef(ctx context.Context, ctrlclient client.Client, clusterKey client.ObjectKey, certificates secret.Certificates, owner metav1.OwnerReference) error { +func (r *KubeadmControlPlaneReconciler) ensureCertificatesOwnerRef(ctx context.Context, certificates secret.Certificates, owner metav1.OwnerReference) error { for _, c := range certificates { - s := &corev1.Secret{} - secretKey := client.ObjectKey{Namespace: clusterKey.Namespace, Name: secret.Name(clusterKey.Name, c.Purpose)} - if err := ctrlclient.Get(ctx, secretKey, s); err != nil { - return errors.Wrapf(err, "failed to get Secret %s", secretKey) - } - // If the Type doesn't match the type used for secrets created by core components, KCP included - if s.Type != clusterv1.ClusterSecretType { + if c.Secret == nil { continue } - patchHelper, err := patch.NewHelper(s, ctrlclient) + + patchHelper, err := patch.NewHelper(c.Secret, r.Client) if err != nil { - return errors.Wrapf(err, "failed to create patchHelper for Secret %s", secretKey) + return err } - // Remove the current controller if one exists. - if controller := metav1.GetControllerOf(s); controller != nil { - s.SetOwnerReferences(util.RemoveOwnerRef(s.OwnerReferences, *controller)) + controller := metav1.GetControllerOf(c.Secret) + // If the current controller is KCP, ensure the owner reference is up to date. + // Note: This ensures secrets created prior to v1alpha4 are updated to have the correct owner reference apiVersion. + if controller != nil && controller.Kind == kubeadmControlPlaneKind { + c.Secret.SetOwnerReferences(util.EnsureOwnerRef(c.Secret.GetOwnerReferences(), owner)) } - s.OwnerReferences = util.EnsureOwnerRef(s.OwnerReferences, owner) - if err := patchHelper.Patch(ctx, s); err != nil { - return errors.Wrapf(err, "failed to patch Secret %s with ownerReference %s", secretKey, owner.String()) + // If the Type doesn't match the type used for secrets created by core components continue without altering the owner reference further. + // Note: This ensures that control plane related secrets created by KubeadmConfig are eventually owned by KCP. + // TODO: Remove this logic once standalone control plane machines are no longer allowed. + if c.Secret.Type == clusterv1.ClusterSecretType { + // Remove the current controller if one exists. + if controller != nil { + c.Secret.SetOwnerReferences(util.RemoveOwnerRef(c.Secret.GetOwnerReferences(), *controller)) + } + c.Secret.SetOwnerReferences(util.EnsureOwnerRef(c.Secret.GetOwnerReferences(), owner)) + } + if err := patchHelper.Patch(ctx, c.Secret); err != nil { + return errors.Wrapf(err, "failed to set ownerReference") } } return nil diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 10e1a224caff..6459174a918a 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -28,7 +28,7 @@ import ( "testing" "time" - "github.com/blang/semver" + "github.com/blang/semver/v4" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -37,24 +37,24 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + controlplanev1webhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/webhooks" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/internal/util/ssa" + "sigs.k8s.io/cluster-api/internal/webhooks" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/collections" @@ -87,12 +87,13 @@ func TestClusterToKubeadmControlPlane(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } - got := r.ClusterToKubeadmControlPlane(cluster) - g.Expect(got).To(Equal(expectedResult)) + got := r.ClusterToKubeadmControlPlane(ctx, cluster) + g.Expect(got).To(BeComparableTo(expectedResult)) } func TestClusterToKubeadmControlPlaneNoControlPlane(t *testing.T) { @@ -102,11 +103,12 @@ func TestClusterToKubeadmControlPlaneNoControlPlane(t *testing.T) { cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault}) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } - got := r.ClusterToKubeadmControlPlane(cluster) + got := r.ClusterToKubeadmControlPlane(ctx, cluster) g.Expect(got).To(BeNil()) } @@ -125,11 +127,12 @@ func TestClusterToKubeadmControlPlaneOtherControlPlane(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } - got := r.ClusterToKubeadmControlPlane(cluster) + got := r.ClusterToKubeadmControlPlane(ctx, cluster) g.Expect(got).To(BeNil()) } @@ -147,13 +150,14 @@ func TestReconcileReturnErrorWhenOwnerClusterIsMissing(t *testing.T) { }(kcp, ns) r := &KubeadmControlPlaneReconciler{ - Client: env, - recorder: record.NewFakeRecorder(32), + Client: env, + SecretCachingClient: secretCachingClient, + recorder: record.NewFakeRecorder(32), } result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result).To(Equal(ctrl.Result{})) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) // calling reconcile should return error g.Expect(env.CleanupAndWait(ctx, cluster)).To(Succeed()) @@ -169,9 +173,10 @@ func TestReconcileUpdateObservedGeneration(t *testing.T) { g := NewWithT(t) r := &KubeadmControlPlaneReconciler{ - Client: env, - recorder: record.NewFakeRecorder(32), - managementCluster: &internal.Management{Client: env.Client, Tracker: nil}, + Client: env, + SecretCachingClient: secretCachingClient, + recorder: record.NewFakeRecorder(32), + managementCluster: &internal.Management{Client: env.Client, Tracker: nil}, } ns, err := env.CreateNamespace(ctx, "test-reconcile-upd-og") @@ -186,7 +191,7 @@ func TestReconcileUpdateObservedGeneration(t *testing.T) { // read kcp.Generation after create errGettingObject := env.Get(ctx, util.ObjectKey(kcp), kcp) - g.Expect(errGettingObject).NotTo(HaveOccurred()) + g.Expect(errGettingObject).ToNot(HaveOccurred()) generation := kcp.Generation // Set cluster.status.InfrastructureReady so we actually enter in the reconcile loop @@ -195,22 +200,22 @@ func TestReconcileUpdateObservedGeneration(t *testing.T) { // call reconcile the first time, so we can check if observedGeneration is set when adding a finalizer result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result).To(Equal(ctrl.Result{})) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) g.Eventually(func() int64 { errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp) - g.Expect(errGettingObject).NotTo(HaveOccurred()) + g.Expect(errGettingObject).ToNot(HaveOccurred()) return kcp.Status.ObservedGeneration }, 10*time.Second).Should(Equal(generation)) // triggers a generation change by changing the spec - kcp.Spec.Replicas = pointer.Int32(*kcp.Spec.Replicas + 2) + kcp.Spec.Replicas = ptr.To[int32](*kcp.Spec.Replicas + 2) g.Expect(env.Update(ctx, kcp)).To(Succeed()) // read kcp.Generation after the update errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp) - g.Expect(errGettingObject).NotTo(HaveOccurred()) + g.Expect(errGettingObject).ToNot(HaveOccurred()) generation = kcp.Generation // call reconcile the second time, so we can check if observedGeneration is set when calling defer patch @@ -220,7 +225,7 @@ func TestReconcileUpdateObservedGeneration(t *testing.T) { g.Eventually(func() int64 { errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp) - g.Expect(errGettingObject).NotTo(HaveOccurred()) + g.Expect(errGettingObject).ToNot(HaveOccurred()) return kcp.Status.ObservedGeneration }, 10*time.Second).Should(Equal(generation)) } @@ -245,18 +250,21 @@ func TestReconcileNoClusterOwnerRef(t *testing.T) { }, }, } - kcp.Default() - g.Expect(kcp.ValidateCreate()).To(Succeed()) + webhook := &controlplanev1webhooks.KubeadmControlPlane{} + g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) + _, err := webhook.ValidateCreate(ctx, kcp) + g.Expect(err).ToNot(HaveOccurred()) fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result).To(Equal(ctrl.Result{})) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) machineList := &clusterv1.MachineList{} g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(metav1.NamespaceDefault))).To(Succeed()) @@ -286,12 +294,13 @@ func TestReconcileNoKCP(t *testing.T) { fakeClient := newFakeClient() r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } func TestReconcileNoCluster(t *testing.T) { @@ -321,16 +330,19 @@ func TestReconcileNoCluster(t *testing.T) { }, }, } - kcp.Default() - g.Expect(kcp.ValidateCreate()).To(Succeed()) + webhook := &controlplanev1webhooks.KubeadmControlPlane{} + g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) + _, err := webhook.ValidateCreate(ctx, kcp) + g.Expect(err).ToNot(HaveOccurred()) fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } - _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) g.Expect(err).To(HaveOccurred()) machineList := &clusterv1.MachineList{} @@ -370,16 +382,19 @@ func TestReconcilePaused(t *testing.T) { }, }, } - kcp.Default() - g.Expect(kcp.ValidateCreate()).To(Succeed()) + webhook := &controlplanev1webhooks.KubeadmControlPlane{} + g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) + _, err := webhook.ValidateCreate(ctx, kcp) + g.Expect(err).ToNot(HaveOccurred()) fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } - _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) - g.Expect(err).NotTo(HaveOccurred()) + _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) machineList := &clusterv1.MachineList{} g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(metav1.NamespaceDefault))).To(Succeed()) @@ -390,7 +405,7 @@ func TestReconcilePaused(t *testing.T) { kcp.ObjectMeta.Annotations = map[string]string{} kcp.ObjectMeta.Annotations[clusterv1.PausedAnnotation] = "paused" _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) } func TestReconcileClusterNoEndpoints(t *testing.T) { @@ -423,13 +438,16 @@ func TestReconcileClusterNoEndpoints(t *testing.T) { }, }, } - kcp.Default() - g.Expect(kcp.ValidateCreate()).To(Succeed()) + webhook := &controlplanev1webhooks.KubeadmControlPlane{} + g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) + _, err := webhook.ValidateCreate(ctx, kcp) + g.Expect(err).ToNot(HaveOccurred()) fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: fakeClient}, Workload: fakeWorkloadCluster{}, @@ -437,16 +455,16 @@ func TestReconcileClusterNoEndpoints(t *testing.T) { } result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // this first requeue is to add finalizer - g.Expect(result).To(Equal(ctrl.Result{})) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) g.Expect(r.Client.Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed()) g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer)) result, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // TODO: this should stop to re-queue as soon as we have a proper remote cluster cache in place. - g.Expect(result).To(Equal(ctrl.Result{Requeue: false, RequeueAfter: 20 * time.Second})) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: false, RequeueAfter: 20 * time.Second})) g.Expect(r.Client.Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed()) // Always expect that the Finalizer is set on the passed in resource @@ -455,7 +473,7 @@ func TestReconcileClusterNoEndpoints(t *testing.T) { g.Expect(kcp.Status.Selector).NotTo(BeEmpty()) _, err = secret.GetFromNamespacedName(ctx, fakeClient, client.ObjectKey{Namespace: metav1.NamespaceDefault, Name: "foo"}, secret.ClusterCA) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) machineList := &clusterv1.MachineList{} g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(metav1.NamespaceDefault))).To(Succeed()) @@ -511,12 +529,14 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ Client: fakeClient, - APIReader: fakeClient, + SecretCachingClient: fakeClient, managementCluster: fmc, managementClusterUncached: fmc, } - g.Expect(r.reconcile(ctx, cluster, kcp)).To(Equal(ctrl.Result{})) + _, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(adoptableMachineFound).To(BeTrue()) machineList := &clusterv1.MachineList{} g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) @@ -605,12 +625,14 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ Client: fakeClient, - APIReader: fakeClient, + SecretCachingClient: fakeClient, managementCluster: fmc, managementClusterUncached: fmc, } - g.Expect(r.reconcile(ctx, cluster, kcp)).To(Equal(ctrl.Result{})) + _, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(adoptableMachineFound).To(BeTrue()) machineList := &clusterv1.MachineList{} g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) @@ -633,7 +655,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { }) t.Run("Deleted KubeadmControlPlanes don't adopt machines", func(t *testing.T) { - // Usually we won't get into the inner reconcile with a deleted control plane, but it's possible when deleting with "oprhanDependents": + // Usually we won't get into the inner reconcile with a deleted control plane, but it's possible when deleting with "orphanDependents": // 1. The deletion timestamp is set in the API server, but our cache has not yet updated // 2. The garbage collector removes our ownership reference from a Machine, triggering a re-reconcile (or we get unlucky with the periodic reconciliation) // 3. We get into the inner reconcile function and re-adopt the Machine @@ -648,6 +670,9 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { now := metav1.Now() kcp.DeletionTimestamp = &now + // We also have to set a finalizer as fake client doesn't accept objects + // with a deletionTimestamp without a finalizer. + kcp.Finalizers = []string{"block-deletion"} fmc := &fakeManagementCluster{ Machines: collections.Machines{}, @@ -686,15 +711,14 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ Client: fakeClient, - APIReader: fakeClient, + SecretCachingClient: fakeClient, managementCluster: fmc, managementClusterUncached: fmc, } - result, err := r.reconcile(ctx, cluster, kcp) - g.Expect(result).To(Equal(ctrl.Result{})) - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring("has just been deleted")) + _, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(adoptableMachineFound).To(BeFalse()) machineList := &clusterv1.MachineList{} g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) @@ -704,7 +728,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { } }) - t.Run("refuses to adopt Machines that are more than one version old", func(t *testing.T) { + t.Run("Do not adopt Machines that are more than one version old", func(t *testing.T) { g := NewWithT(t) cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault) @@ -728,7 +752,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { Kind: "KubeadmConfig", }, }, - Version: pointer.String("v1.15.0"), + Version: ptr.To("v1.15.0"), }, }, }, @@ -740,13 +764,16 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { recorder := record.NewFakeRecorder(32) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, - APIReader: fakeClient, + SecretCachingClient: fakeClient, recorder: recorder, managementCluster: fmc, managementClusterUncached: fmc, } - g.Expect(r.reconcile(ctx, cluster, kcp)).To(Equal(ctrl.Result{})) + _, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(adoptableMachineFound).To(BeTrue()) + // Message: Warning AdoptionFailed Could not adopt Machine test/test0: its version ("v1.15.0") is outside supported +/- one minor version skew from KCP's ("v1.17.0") g.Expect(recorder.Events).To(Receive(ContainSubstring("minor version"))) @@ -768,9 +795,9 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { cluster.Status.InfrastructureReady = true kcp.Spec.Version = "v1.21.0" key, err := certs.NewPrivateKey() - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) crt, err := getTestCACert(key) - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) clusterSecret := &corev1.Secret{ // The Secret's Type is used by KCP to determine whether it is user-provided. @@ -791,7 +818,7 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { kcpOwner := *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")) - t.Run("add KCP owner for secrets with no controller reference", func(t *testing.T) { + t.Run("add KCP owner for secrets with no controller reference", func(*testing.T) { objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()} certificates := secret.Certificates{ {Purpose: secret.ClusterCA}, @@ -806,13 +833,20 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { // Set the Secret Type to clusterv1.ClusterSecretType which signals this Secret was generated by CAPI. s.Type = clusterv1.ClusterSecretType + // Store the secret in the certificate. + c.Secret = s + objs = append(objs, s) } fakeClient := newFakeClient(objs...) - err = ensureCertificatesOwnerRef(ctx, fakeClient, client.ObjectKeyFromObject(cluster), certificates, kcpOwner) - g.Expect(err).To(BeNil()) + r := KubeadmControlPlaneReconciler{ + Client: fakeClient, + SecretCachingClient: fakeClient, + } + err = r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner) + g.Expect(err).ToNot(HaveOccurred()) secrets := &corev1.SecretList{} g.Expect(fakeClient.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabels{"testing": "yes"})).To(Succeed()) @@ -821,7 +855,7 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { } }) - t.Run("replace non-KCP controller with KCP controller reference", func(t *testing.T) { + t.Run("replace non-KCP controller with KCP controller reference", func(*testing.T) { objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()} certificates := secret.Certificates{ {Purpose: secret.ClusterCA}, @@ -844,15 +878,24 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { Kind: "OtherController", Name: "name", UID: "uid", - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }) + + // Store the secret in the certificate. + c.Secret = s + objs = append(objs, s) } fakeClient := newFakeClient(objs...) - err := ensureCertificatesOwnerRef(ctx, fakeClient, client.ObjectKeyFromObject(cluster), certificates, kcpOwner) - g.Expect(err).To(BeNil()) + + r := KubeadmControlPlaneReconciler{ + Client: fakeClient, + SecretCachingClient: fakeClient, + } + err := r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner) + g.Expect(err).ToNot(HaveOccurred()) secrets := &corev1.SecretList{} g.Expect(fakeClient.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabels{"testing": "yes"})).To(Succeed()) @@ -878,24 +921,32 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { // Set the Secret Type to any type which signals this Secret was is user-provided. s.Type = corev1.SecretTypeOpaque // Set the a controller owner reference of an unknown type on the secret. - s.SetOwnerReferences([]metav1.OwnerReference{ - { + s.SetOwnerReferences(util.EnsureOwnerRef(s.GetOwnerReferences(), + metav1.OwnerReference{ APIVersion: bootstrapv1.GroupVersion.String(), // This owner reference to a different controller should be preserved. Kind: "OtherController", Name: kcp.Name, UID: kcp.UID, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, - }) + )) + + // Store the secret in the certificate. + c.Secret = s objs = append(objs, s) } fakeClient := newFakeClient(objs...) - err := ensureCertificatesOwnerRef(ctx, fakeClient, client.ObjectKeyFromObject(cluster), certificates, kcpOwner) - g.Expect(err).To(BeNil()) + + r := KubeadmControlPlaneReconciler{ + Client: fakeClient, + SecretCachingClient: fakeClient, + } + err := r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner) + g.Expect(err).ToNot(HaveOccurred()) secrets := &corev1.SecretList{} g.Expect(fakeClient.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabels{"testing": "yes"})).To(Succeed()) @@ -1081,42 +1132,45 @@ func TestReconcileCertificateExpiries(t *testing.T) { machineWithoutNodeRefKubeadmConfig, ) - controlPlane, err := internal.NewControlPlane(ctx, fakeClient, cluster, kcp, ownedMachines) - g.Expect(err).ToNot(HaveOccurred()) + managementCluster := &fakeManagementCluster{ + Workload: fakeWorkloadCluster{ + APIServerCertificateExpiry: &detectedExpiry, + }, + } r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - managementCluster: &fakeManagementCluster{ - Workload: fakeWorkloadCluster{ - APIServerCertificateExpiry: &detectedExpiry, - }, - }, + Client: fakeClient, + SecretCachingClient: fakeClient, + managementCluster: managementCluster, } - _, err = r.reconcileCertificateExpiries(ctx, controlPlane) - g.Expect(err).NotTo(HaveOccurred()) + controlPlane, err := internal.NewControlPlane(ctx, managementCluster, fakeClient, cluster, kcp, ownedMachines) + g.Expect(err).ToNot(HaveOccurred()) + + err = r.reconcileCertificateExpiries(ctx, controlPlane) + g.Expect(err).ToNot(HaveOccurred()) // Verify machineWithoutExpiryAnnotation has detectedExpiry. actualKubeadmConfig := bootstrapv1.KubeadmConfig{} err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithoutExpiryAnnotationKubeadmConfig), &actualKubeadmConfig) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) actualExpiry := actualKubeadmConfig.Annotations[clusterv1.MachineCertificatesExpiryDateAnnotation] g.Expect(actualExpiry).To(Equal(detectedExpiry.Format(time.RFC3339))) // Verify machineWithExpiryAnnotation has still preExistingExpiry. err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithExpiryAnnotationKubeadmConfig), &actualKubeadmConfig) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) actualExpiry = actualKubeadmConfig.Annotations[clusterv1.MachineCertificatesExpiryDateAnnotation] g.Expect(actualExpiry).To(Equal(preExistingExpiry.Format(time.RFC3339))) // Verify machineWithDeletionTimestamp has still no expiry annotation. err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithDeletionTimestampKubeadmConfig), &actualKubeadmConfig) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(actualKubeadmConfig.Annotations).ToNot(ContainElement(clusterv1.MachineCertificatesExpiryDateAnnotation)) // Verify machineWithoutNodeRef has still no expiry annotation. err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithoutNodeRefKubeadmConfig), &actualKubeadmConfig) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(actualKubeadmConfig.Annotations).ToNot(ContainElement(clusterv1.MachineCertificatesExpiryDateAnnotation)) } @@ -1126,7 +1180,7 @@ func TestReconcileInitializeControlPlane(t *testing.T) { t.Log("Creating the namespace") ns, err := env.CreateNamespace(ctx, "test-kcp-reconcile-initializecontrolplane") - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) return ns } @@ -1151,7 +1205,7 @@ func TestReconcileInitializeControlPlane(t *testing.T) { } g.Expect(env.Create(ctx, cluster)).To(Succeed()) patchHelper, err := patch.NewHelper(cluster, env) - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) cluster.Status = clusterv1.ClusterStatus{InfrastructureReady: true} g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) @@ -1225,7 +1279,8 @@ dns: type: CoreDNS imageRepository: registry.k8s.io kind: ClusterConfiguration -kubernetesVersion: metav1.16.1`, +kubernetesVersion: metav1.16.1 +`, }, } g.Expect(env.Create(ctx, kubeadmCM)).To(Succeed()) @@ -1261,9 +1316,9 @@ kubernetesVersion: metav1.16.1`, expectedLabels := map[string]string{clusterv1.ClusterNameLabel: "foo"} r := &KubeadmControlPlaneReconciler{ - Client: env, - APIReader: env.GetAPIReader(), - recorder: record.NewFakeRecorder(32), + Client: env, + SecretCachingClient: secretCachingClient, + recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: env}, Workload: fakeWorkloadCluster{ @@ -1282,20 +1337,20 @@ kubernetesVersion: metav1.16.1`, Status: internal.ClusterStatus{}, }, }, - disableInPlacePropagation: true, + ssaCache: ssa.NewCache(), } result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // this first requeue is to add finalizer - g.Expect(result).To(Equal(ctrl.Result{})) - g.Expect(r.APIReader.Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed()) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) + g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed()) g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer)) g.Eventually(func(g Gomega) { _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(r.APIReader.Get(ctx, client.ObjectKey{Name: kcp.Name, Namespace: kcp.Namespace}, kcp)).To(Succeed()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Name: kcp.Name, Namespace: kcp.Namespace}, kcp)).To(Succeed()) // Expect the referenced infrastructure template to have a Cluster Owner Reference. g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(genericInfrastructureMachineTemplate), genericInfrastructureMachineTemplate)).To(Succeed()) g.Expect(genericInfrastructureMachineTemplate.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{ @@ -1313,13 +1368,13 @@ kubernetesVersion: metav1.16.1`, g.Expect(conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue()) s, err := secret.GetFromNamespacedName(ctx, env, client.ObjectKey{Namespace: cluster.Namespace, Name: "foo"}, secret.ClusterCA) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(s).NotTo(BeNil()) g.Expect(s.Data).NotTo(BeEmpty()) g.Expect(s.Labels).To(Equal(expectedLabels)) k, err := kubeconfig.FromSecret(ctx, env, util.ObjectKey(cluster)) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(k).NotTo(BeEmpty()) machineList := &clusterv1.MachineList{} @@ -1330,7 +1385,7 @@ kubernetesVersion: metav1.16.1`, g.Expect(machine.Name).To(HavePrefix(kcp.Name)) // Newly cloned infra objects should have the infraref annotation. infraObj, err := external.Get(ctx, r.Client, &machine.Spec.InfrastructureRef, machine.Spec.InfrastructureRef.Namespace) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName())) g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String())) }, 30*time.Second).Should(Succeed()) @@ -1342,7 +1397,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { t.Log("Creating the namespace") ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-sync-machines") - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) t.Log("Creating the Cluster") cluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: ns.Name, Name: "test-cluster"}} @@ -1441,7 +1496,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { g.Expect(env.Create(ctx, existingKubeadmConfig, client.FieldOwner("manager"))).To(Succeed()) // Existing Machine to validate in-place mutation - fd := pointer.String("fd1") + fd := ptr.To("fd1") inPlaceMutatingMachine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ Kind: "Machine", @@ -1467,9 +1522,9 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { ConfigRef: bootstrapRef, }, InfrastructureRef: *infraMachineRef, - Version: pointer.String("v1.25.3"), + Version: ptr.To("v1.25.3"), FailureDomain: fd, - ProviderID: pointer.String("provider-id"), + ProviderID: ptr.To("provider-id"), NodeDrainTimeout: duration5s, NodeVolumeDetachTimeout: duration5s, NodeDeletionTimeout: duration5s, @@ -1497,7 +1552,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Namespace: namespace.Name, }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("machine-bootstrap-secret"), + DataSecretName: ptr.To("machine-bootstrap-secret"), }, }, } @@ -1512,6 +1567,32 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { return !deletingMachine.DeletionTimestamp.IsZero() }, 30*time.Second).Should(BeTrue()) + // Existing machine that has a InfrastructureRef which does not exist. + nilInfraMachineMachine := &clusterv1.Machine{ + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "nil-infra-machine-machine", + Namespace: namespace.Name, + Labels: map[string]string{}, + Annotations: map[string]string{}, + Finalizers: []string{"testing-finalizer"}, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: testCluster.Name, + InfrastructureRef: corev1.ObjectReference{ + Namespace: namespace.Name, + }, + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To("machine-bootstrap-secret"), + }, + }, + } + g.Expect(env.Create(ctx, nilInfraMachineMachine, client.FieldOwner(classicManager))).To(Succeed()) + // Delete the machine to put it in the deleting state + kcp := &controlplanev1.KubeadmControlPlane{ TypeMeta: metav1.TypeMeta{ Kind: "KubeadmControlPlane", @@ -1557,6 +1638,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Machines: collections.Machines{ inPlaceMutatingMachine.Name: inPlaceMutatingMachine, deletingMachine.Name: deletingMachine, + nilInfraMachineMachine.Name: nilInfraMachineMachine, }, KubeadmConfigs: map[string]*bootstrapv1.KubeadmConfig{ inPlaceMutatingMachine.Name: existingKubeadmConfig, @@ -1574,12 +1656,16 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { // Run syncMachines to clean up managed fields and have proper field ownership // for Machines, InfrastructureMachines and KubeadmConfigs. - reconciler := &KubeadmControlPlaneReconciler{Client: env, ssaCache: ssa.NewCache()} + reconciler := &KubeadmControlPlaneReconciler{ + Client: env, + SecretCachingClient: secretCachingClient, + ssaCache: ssa.NewCache(), + } g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed()) // The inPlaceMutatingMachine should have cleaned up managed fields. updatedInplaceMutatingMachine := inPlaceMutatingMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)).To(Succeed()) // Verify ManagedFields g.Expect(updatedInplaceMutatingMachine.ManagedFields).Should( ContainElement(ssa.MatchManagedFieldsEntry(kcpManagerName, metav1.ManagedFieldsOperationApply)), @@ -1593,7 +1679,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { // The InfrastructureMachine should have ownership of "labels" and "annotations" transferred to // "capi-kubeadmcontrolplane" manager. updatedInfraMachine := existingInfraMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) // Verify ManagedFields g.Expect(updatedInfraMachine.GetManagedFields()).Should( @@ -1611,7 +1697,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { // The KubeadmConfig should have ownership of "labels" and "annotations" transferred to // "capi-kubeadmcontrolplane" manager. updatedKubeadmConfig := existingKubeadmConfig.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed()) // Verify ManagedFields g.Expect(updatedKubeadmConfig.GetManagedFields()).Should( @@ -1658,7 +1744,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { // Verify in-place mutable fields are updated on the Machine. updatedInplaceMutatingMachine = inPlaceMutatingMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)).To(Succeed()) // Verify Labels g.Expect(updatedInplaceMutatingMachine.Labels).Should(Equal(expectedLabels)) // Verify Annotations @@ -1666,26 +1752,26 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { // Verify Node timeout values g.Expect(updatedInplaceMutatingMachine.Spec.NodeDrainTimeout).Should(And( Not(BeNil()), - HaveValue(Equal(*kcp.Spec.MachineTemplate.NodeDrainTimeout)), + HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeDrainTimeout)), )) g.Expect(updatedInplaceMutatingMachine.Spec.NodeDeletionTimeout).Should(And( Not(BeNil()), - HaveValue(Equal(*kcp.Spec.MachineTemplate.NodeDeletionTimeout)), + HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeDeletionTimeout)), )) g.Expect(updatedInplaceMutatingMachine.Spec.NodeVolumeDetachTimeout).Should(And( Not(BeNil()), - HaveValue(Equal(*kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout)), + HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout)), )) // Verify that the non in-place mutating fields remain the same. g.Expect(updatedInplaceMutatingMachine.Spec.FailureDomain).Should(Equal(inPlaceMutatingMachine.Spec.FailureDomain)) g.Expect(updatedInplaceMutatingMachine.Spec.ProviderID).Should(Equal(inPlaceMutatingMachine.Spec.ProviderID)) g.Expect(updatedInplaceMutatingMachine.Spec.Version).Should(Equal(inPlaceMutatingMachine.Spec.Version)) - g.Expect(updatedInplaceMutatingMachine.Spec.InfrastructureRef).Should(Equal(inPlaceMutatingMachine.Spec.InfrastructureRef)) - g.Expect(updatedInplaceMutatingMachine.Spec.Bootstrap).Should(Equal(inPlaceMutatingMachine.Spec.Bootstrap)) + g.Expect(updatedInplaceMutatingMachine.Spec.InfrastructureRef).Should(BeComparableTo(inPlaceMutatingMachine.Spec.InfrastructureRef)) + g.Expect(updatedInplaceMutatingMachine.Spec.Bootstrap).Should(BeComparableTo(inPlaceMutatingMachine.Spec.Bootstrap)) // Verify in-place mutable fields are updated on InfrastructureMachine updatedInfraMachine = existingInfraMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) // Verify Labels g.Expect(updatedInfraMachine.GetLabels()).Should(Equal(expectedLabels)) // Verify Annotations @@ -1695,17 +1781,17 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { // Verify in-place mutable fields are updated on the KubeadmConfig. updatedKubeadmConfig = existingKubeadmConfig.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed()) // Verify Labels g.Expect(updatedKubeadmConfig.GetLabels()).Should(Equal(expectedLabels)) // Verify Annotations g.Expect(updatedKubeadmConfig.GetAnnotations()).Should(Equal(kcp.Spec.MachineTemplate.ObjectMeta.Annotations)) // Verify spec remains the same - g.Expect(updatedKubeadmConfig.Spec).Should(Equal(existingKubeadmConfig.Spec)) + g.Expect(updatedKubeadmConfig.Spec).Should(BeComparableTo(existingKubeadmConfig.Spec)) // The deleting machine should not change. updatedDeletingMachine := deletingMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) // Verify ManagedFields g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot( @@ -1721,7 +1807,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { g.Expect(updatedDeletingMachine.Labels).Should(Equal(deletingMachine.Labels)) g.Expect(updatedDeletingMachine.Annotations).Should(Equal(deletingMachine.Annotations)) // Verify the machine spec is unchanged. - g.Expect(updatedDeletingMachine.Spec).Should(Equal(deletingMachine.Spec)) + g.Expect(updatedDeletingMachine.Spec).Should(BeComparableTo(deletingMachine.Spec)) } func TestKubeadmControlPlaneReconciler_updateCoreDNS(t *testing.T) { @@ -1819,7 +1905,6 @@ kubernetesVersion: metav1.16.1`, kubeadmCM.DeepCopy(), } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := &fakeWorkloadCluster{ Workload: &internal.Workload{ @@ -1877,7 +1962,6 @@ kubernetesVersion: metav1.16.1`, } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ Workload: &internal.Workload{ @@ -1923,7 +2007,6 @@ kubernetesVersion: metav1.16.1`, } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ Workload: &internal.Workload{ @@ -1984,7 +2067,6 @@ kubernetesVersion: metav1.16.1`, } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ Workload: &internal.Workload{ @@ -2007,15 +2089,18 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer) initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy()} + machines := collections.New() for i := 0; i < 3; i++ { m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) initObjs = append(initObjs, m) + machines.Insert(m) } fakeClient := newFakeClient(initObjs...) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: fakeClient}, Workload: fakeWorkloadCluster{}, @@ -2024,7 +2109,13 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { recorder: record.NewFakeRecorder(32), } - result, err := r.reconcileDelete(ctx, cluster, kcp) + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: machines, + } + + result, err := r.reconcileDelete(ctx, controlPlane) g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: deleteRequeueAfter})) g.Expect(err).ToNot(HaveOccurred()) g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer)) @@ -2033,9 +2124,14 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { g.Expect(fakeClient.List(ctx, &controlPlaneMachines)).To(Succeed()) g.Expect(controlPlaneMachines.Items).To(BeEmpty()) - result, err = r.reconcileDelete(ctx, cluster, kcp) - g.Expect(result).To(Equal(ctrl.Result{})) - g.Expect(err).NotTo(HaveOccurred()) + controlPlane = &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + } + + result, err = r.reconcileDelete(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(kcp.Finalizers).To(BeEmpty()) }) @@ -2057,15 +2153,18 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), workerMachine.DeepCopy()} + machines := collections.New() for i := 0; i < 3; i++ { m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) initObjs = append(initObjs, m) + machines.Insert(m) } fakeClient := newFakeClient(initObjs...) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: fakeClient}, Workload: fakeWorkloadCluster{}, @@ -2073,8 +2172,14 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { recorder: record.NewFakeRecorder(32), } - result, err := r.reconcileDelete(ctx, cluster, kcp) - g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: deleteRequeueAfter})) + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: machines, + } + + result, err := r.reconcileDelete(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: deleteRequeueAfter})) g.Expect(err).ToNot(HaveOccurred()) g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer)) @@ -2106,15 +2211,18 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), workerMachinePool.DeepCopy()} + machines := collections.New() for i := 0; i < 3; i++ { m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) initObjs = append(initObjs, m) + machines.Insert(m) } fakeClient := newFakeClient(initObjs...) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: fakeClient}, Workload: fakeWorkloadCluster{}, @@ -2122,8 +2230,14 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { recorder: record.NewFakeRecorder(32), } - result, err := r.reconcileDelete(ctx, cluster, kcp) - g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: deleteRequeueAfter})) + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: machines, + } + + result, err := r.reconcileDelete(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: deleteRequeueAfter})) g.Expect(err).ToNot(HaveOccurred()) g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer)) @@ -2145,7 +2259,8 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: fakeClient}, Workload: fakeWorkloadCluster{}, @@ -2153,9 +2268,14 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { recorder: record.NewFakeRecorder(32), } - result, err := r.reconcileDelete(ctx, cluster, kcp) - g.Expect(result).To(Equal(ctrl.Result{})) - g.Expect(err).NotTo(HaveOccurred()) + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + } + + result, err := r.reconcileDelete(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(kcp.Finalizers).To(BeEmpty()) }) } @@ -2165,7 +2285,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { func newFakeClient(initObjs ...client.Object) client.Client { return &fakeClient{ startTime: time.Now(), - Client: fake.NewClientBuilder().WithObjects(initObjs...).Build(), + Client: fake.NewClientBuilder().WithObjects(initObjs...).WithStatusSubresource(&controlplanev1.KubeadmControlPlane{}).Build(), } } @@ -2230,7 +2350,7 @@ func createClusterWithControlPlane(namespace string) (*clusterv1.Cluster, *contr APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", }, }, - Replicas: pointer.Int32(int32(3)), + Replicas: ptr.To[int32](int32(3)), Version: "v1.16.6", RolloutStrategy: &controlplanev1.RolloutStrategy{ Type: "RollingUpdate", @@ -2299,7 +2419,10 @@ func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *control }, }, } - machine.Default() + webhook := webhooks.Machine{} + if err := webhook.Default(ctx, machine); err != nil { + panic(err) + } node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -2321,6 +2444,10 @@ func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *control } func setMachineHealthy(m *clusterv1.Machine) { + m.Status.NodeRef = &corev1.ObjectReference{ + Kind: "Node", + Name: "node-1", + } conditions.MarkTrue(m, controlplanev1.MachineAPIServerPodHealthyCondition) conditions.MarkTrue(m, controlplanev1.MachineControllerManagerPodHealthyCondition) conditions.MarkTrue(m, controlplanev1.MachineSchedulerPodHealthyCondition) diff --git a/controlplane/kubeadm/internal/controllers/fakes_test.go b/controlplane/kubeadm/internal/controllers/fakes_test.go index 7f5a962e2db8..cf9fcbafe66e 100644 --- a/controlplane/kubeadm/internal/controllers/fakes_test.go +++ b/controlplane/kubeadm/internal/controllers/fakes_test.go @@ -20,7 +20,7 @@ import ( "context" "time" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/pkg/errors" "sigs.k8s.io/controller-runtime/pkg/client" @@ -96,6 +96,10 @@ func (f fakeWorkloadCluster) AllowBootstrapTokensToGetNodes(_ context.Context) e return nil } +func (f fakeWorkloadCluster) AllowClusterAdminPermissions(_ context.Context, _ semver.Version) error { + return nil +} + func (f fakeWorkloadCluster) ReconcileKubeletRBACRole(_ context.Context, _ semver.Version) error { return nil } @@ -104,11 +108,11 @@ func (f fakeWorkloadCluster) ReconcileKubeletRBACBinding(_ context.Context, _ se return nil } -func (f fakeWorkloadCluster) UpdateKubernetesVersionInKubeadmConfigMap(_ context.Context, _ semver.Version) error { +func (f fakeWorkloadCluster) UpdateKubernetesVersionInKubeadmConfigMap(semver.Version) func(*bootstrapv1.ClusterConfiguration) { return nil } -func (f fakeWorkloadCluster) UpdateEtcdVersionInKubeadmConfigMap(_ context.Context, _, _ string, _ semver.Version) error { +func (f fakeWorkloadCluster) UpdateEtcdLocalInKubeadmConfigMap(*bootstrapv1.LocalEtcd) func(*bootstrapv1.ClusterConfiguration) { return nil } @@ -128,13 +132,17 @@ func (f fakeWorkloadCluster) EtcdMembers(_ context.Context) ([]string, error) { return f.EtcdMembersResult, nil } +func (f fakeWorkloadCluster) UpdateClusterConfiguration(context.Context, semver.Version, ...func(*bootstrapv1.ClusterConfiguration)) error { + return nil +} + type fakeMigrator struct { migrateCalled bool migrateErr error migratedCorefile string } -func (m *fakeMigrator) Migrate(_, _, _ string, _ bool) (string, error) { +func (m *fakeMigrator) Migrate(string, string, string, bool) (string, error) { m.migrateCalled = true if m.migrateErr != nil { return "", m.migrateErr diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index 2d59f71dea84..f7e3a93f758d 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,22 +47,22 @@ import ( "sigs.k8s.io/cluster-api/util/secret" ) -func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (ctrl.Result, error) { +func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) - endpoint := cluster.Spec.ControlPlaneEndpoint + endpoint := controlPlane.Cluster.Spec.ControlPlaneEndpoint if endpoint.IsZero() { return ctrl.Result{}, nil } - controllerOwnerRef := *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")) - clusterName := util.ObjectKey(cluster) - configSecret, err := secret.GetFromNamespacedName(ctx, r.Client, clusterName, secret.Kubeconfig) + controllerOwnerRef := *metav1.NewControllerRef(controlPlane.KCP, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)) + clusterName := util.ObjectKey(controlPlane.Cluster) + configSecret, err := secret.GetFromNamespacedName(ctx, r.SecretCachingClient, clusterName, secret.Kubeconfig) switch { case apierrors.IsNotFound(err): createErr := kubeconfig.CreateSecretWithOwner( ctx, - r.Client, + r.SecretCachingClient, clusterName, endpoint.String(), controllerOwnerRef, @@ -77,12 +76,12 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, return ctrl.Result{}, errors.Wrap(err, "failed to retrieve kubeconfig Secret") } - if err := r.adoptKubeconfigSecret(ctx, cluster, configSecret, kcp); err != nil { + if err := r.adoptKubeconfigSecret(ctx, configSecret, controlPlane.KCP); err != nil { return ctrl.Result{}, err } // only do rotation on owned secrets - if !util.IsControlledBy(configSecret, kcp) { + if !util.IsControlledBy(configSecret, controlPlane.KCP) { return ctrl.Result{}, nil } @@ -92,7 +91,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, } if needsRotation { - log.Info("rotating kubeconfig secret") + log.Info("Rotating kubeconfig secret") if err := kubeconfig.RegenerateSecret(ctx, r.Client, configSecret); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to regenerate kubeconfig") } @@ -102,46 +101,32 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, } // Ensure the KubeadmConfigSecret has an owner reference to the control plane if it is not a user-provided secret. -func (r *KubeadmControlPlaneReconciler) adoptKubeconfigSecret(ctx context.Context, cluster *clusterv1.Cluster, configSecret *corev1.Secret, kcp *controlplanev1.KubeadmControlPlane) error { - log := ctrl.LoggerFrom(ctx) - controller := metav1.GetControllerOf(configSecret) - - // If the Type doesn't match the CAPI-created secret type this is a no-op. - if configSecret.Type != clusterv1.ClusterSecretType { - return nil - } - // If the secret is already controlled by KCP this is a no-op. - if controller != nil && controller.Kind == "KubeadmControlPlane" { - return nil - } - log.Info("Adopting KubeConfig secret", "Secret", klog.KObj(configSecret)) - patch, err := patch.NewHelper(configSecret, r.Client) +func (r *KubeadmControlPlaneReconciler) adoptKubeconfigSecret(ctx context.Context, configSecret *corev1.Secret, kcp *controlplanev1.KubeadmControlPlane) (reterr error) { + patchHelper, err := patch.NewHelper(configSecret, r.Client) if err != nil { - return errors.Wrap(err, "failed to create patch helper for the kubeconfig secret") - } - - // If the kubeconfig secret was created by v1alpha2 controllers, and thus it has the Cluster as the owner instead of KCP. - // In this case remove the ownerReference to the Cluster. - if util.IsOwnedByObject(configSecret, cluster) { - configSecret.SetOwnerReferences(util.RemoveOwnerRef(configSecret.OwnerReferences, metav1.OwnerReference{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "Cluster", - Name: cluster.Name, - UID: cluster.UID, - })) + return err } + defer func() { + if err := patchHelper.Patch(ctx, configSecret); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + controller := metav1.GetControllerOf(configSecret) - // Remove the current controller if one exists. - if controller != nil { - configSecret.SetOwnerReferences(util.RemoveOwnerRef(configSecret.OwnerReferences, *controller)) + // If the current controller is KCP, ensure the owner reference is up to date and return early. + // Note: This ensures secrets created prior to v1alpha4 are updated to have the correct owner reference apiVersion. + if controller != nil && controller.Kind == kubeadmControlPlaneKind { + configSecret.SetOwnerReferences(util.EnsureOwnerRef(configSecret.GetOwnerReferences(), *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)))) + return nil } - // Add the KubeadmControlPlane as the controller for this secret. - configSecret.OwnerReferences = util.EnsureOwnerRef(configSecret.OwnerReferences, - *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))) - - if err := patch.Patch(ctx, configSecret); err != nil { - return errors.Wrap(err, "failed to patch the kubeconfig secret") + // If secret type is a CAPI-created secret ensure the owner reference is to KCP. + if configSecret.Type == clusterv1.ClusterSecretType { + // Remove the current controller if one exists and ensure KCP is the controller of the secret. + if controller != nil { + configSecret.SetOwnerReferences(util.RemoveOwnerRef(configSecret.GetOwnerReferences(), *controller)) + } + configSecret.SetOwnerReferences(util.EnsureOwnerRef(configSecret.GetOwnerReferences(), *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)))) } return nil } @@ -180,20 +165,31 @@ func (r *KubeadmControlPlaneReconciler) reconcileExternalReference(ctx context.C func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, bootstrapSpec *bootstrapv1.KubeadmConfigSpec, failureDomain *string) error { var errs []error + // Compute desired Machine + machine, err := r.computeDesiredMachine(kcp, cluster, failureDomain, nil) + if err != nil { + return errors.Wrap(err, "failed to create Machine: failed to compute desired Machine") + } + // Since the cloned resource should eventually have a controller ref for the Machine, we create an // OwnerReference here without the Controller field set infraCloneOwner := &metav1.OwnerReference{ APIVersion: controlplanev1.GroupVersion.String(), - Kind: "KubeadmControlPlane", + Kind: kubeadmControlPlaneKind, Name: kcp.Name, UID: kcp.UID, } + infraMachineName := machine.Name + if r.DeprecatedInfraMachineNaming { + infraMachineName = names.SimpleNameGenerator.GenerateName(kcp.Spec.MachineTemplate.InfrastructureRef.Name + "-") + } // Clone the infrastructure template infraRef, err := external.CreateFromTemplate(ctx, &external.CreateFromTemplateInput{ Client: r.Client, TemplateRef: &kcp.Spec.MachineTemplate.InfrastructureRef, Namespace: kcp.Namespace, + Name: infraMachineName, OwnerRef: infraCloneOwner, ClusterName: cluster.Name, Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), @@ -205,9 +201,10 @@ func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx conte clusterv1.ConditionSeverityError, err.Error()) return errors.Wrap(err, "failed to clone infrastructure template") } + machine.Spec.InfrastructureRef = *infraRef // Clone the bootstrap configuration - bootstrapRef, err := r.generateKubeadmConfig(ctx, kcp, cluster, bootstrapSpec) + bootstrapRef, err := r.generateKubeadmConfig(ctx, kcp, cluster, bootstrapSpec, machine.Name) if err != nil { conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedCondition, controlplanev1.BootstrapTemplateCloningFailedReason, clusterv1.ConditionSeverityError, err.Error()) @@ -216,7 +213,9 @@ func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx conte // Only proceed to generating the Machine if we haven't encountered an error if len(errs) == 0 { - if err := r.createMachine(ctx, kcp, cluster, infraRef, bootstrapRef, failureDomain); err != nil { + machine.Spec.Bootstrap.ConfigRef = bootstrapRef + + if err := r.createMachine(ctx, kcp, machine); err != nil { conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedCondition, controlplanev1.MachineGenerationFailedReason, clusterv1.ConditionSeverityError, err.Error()) errs = append(errs, errors.Wrap(err, "failed to create Machine")) @@ -256,18 +255,18 @@ func (r *KubeadmControlPlaneReconciler) cleanupFromGeneration(ctx context.Contex return kerrors.NewAggregate(errs) } -func (r *KubeadmControlPlaneReconciler) generateKubeadmConfig(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, spec *bootstrapv1.KubeadmConfigSpec) (*corev1.ObjectReference, error) { +func (r *KubeadmControlPlaneReconciler) generateKubeadmConfig(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, spec *bootstrapv1.KubeadmConfigSpec, name string) (*corev1.ObjectReference, error) { // Create an owner reference without a controller reference because the owning controller is the machine controller owner := metav1.OwnerReference{ APIVersion: controlplanev1.GroupVersion.String(), - Kind: "KubeadmControlPlane", + Kind: kubeadmControlPlaneKind, Name: kcp.Name, UID: kcp.UID, } bootstrapConfig := &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ - Name: names.SimpleNameGenerator.GenerateName(kcp.Name + "-"), + Name: name, Namespace: kcp.Namespace, Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), Annotations: kcp.Spec.MachineTemplate.ObjectMeta.Annotations, @@ -306,17 +305,13 @@ func (r *KubeadmControlPlaneReconciler) updateExternalObject(ctx context.Context // Update annotations updatedObject.SetAnnotations(kcp.Spec.MachineTemplate.ObjectMeta.Annotations) - if err := ssa.Patch(ctx, r.Client, kcpManagerName, updatedObject); err != nil { + if err := ssa.Patch(ctx, r.Client, kcpManagerName, updatedObject, ssa.WithCachingProxy{Cache: r.ssaCache, Original: obj}); err != nil { return errors.Wrapf(err, "failed to update %s", obj.GetObjectKind().GroupVersionKind().Kind) } return nil } -func (r *KubeadmControlPlaneReconciler) createMachine(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, infraRef, bootstrapRef *corev1.ObjectReference, failureDomain *string) error { - machine, err := r.computeDesiredMachine(kcp, cluster, infraRef, bootstrapRef, failureDomain, nil) - if err != nil { - return errors.Wrap(err, "failed to create Machine: failed to compute desired Machine") - } +func (r *KubeadmControlPlaneReconciler) createMachine(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) error { if err := ssa.Patch(ctx, r.Client, kcpManagerName, machine); err != nil { return errors.Wrap(err, "failed to create Machine") } @@ -327,11 +322,7 @@ func (r *KubeadmControlPlaneReconciler) createMachine(ctx context.Context, kcp * } func (r *KubeadmControlPlaneReconciler) updateMachine(ctx context.Context, machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) (*clusterv1.Machine, error) { - updatedMachine, err := r.computeDesiredMachine( - kcp, cluster, - &machine.Spec.InfrastructureRef, machine.Spec.Bootstrap.ConfigRef, - machine.Spec.FailureDomain, machine, - ) + updatedMachine, err := r.computeDesiredMachine(kcp, cluster, machine.Spec.FailureDomain, machine) if err != nil { return nil, errors.Wrap(err, "failed to update Machine: failed to compute desired Machine") } @@ -351,7 +342,7 @@ func (r *KubeadmControlPlaneReconciler) updateMachine(ctx context.Context, machi // There are small differences in how we calculate the Machine depending on if it // is a create or update. Example: for a new Machine we have to calculate a new name, // while for an existing Machine we have to use the name of the existing Machine. -func (r *KubeadmControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, infraRef, bootstrapRef *corev1.ObjectReference, failureDomain *string, existingMachine *clusterv1.Machine) (*clusterv1.Machine, error) { +func (r *KubeadmControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, failureDomain *string, existingMachine *clusterv1.Machine) (*clusterv1.Machine, error) { var machineName string var machineUID types.UID var version *string @@ -408,19 +399,15 @@ func (r *KubeadmControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev Namespace: kcp.Namespace, // Note: by setting the ownerRef on creation we signal to the Machine controller that this is not a stand-alone Machine. OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")), + *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)), }, Labels: map[string]string{}, Annotations: map[string]string{}, }, Spec: clusterv1.MachineSpec{ - ClusterName: cluster.Name, - Version: version, - FailureDomain: failureDomain, - InfrastructureRef: *infraRef, - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: bootstrapRef, - }, + ClusterName: cluster.Name, + Version: version, + FailureDomain: failureDomain, }, } @@ -446,5 +433,10 @@ func (r *KubeadmControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev desiredMachine.Spec.NodeDeletionTimeout = kcp.Spec.MachineTemplate.NodeDeletionTimeout desiredMachine.Spec.NodeVolumeDetachTimeout = kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout + if existingMachine != nil { + desiredMachine.Spec.InfrastructureRef = existingMachine.Spec.InfrastructureRef + desiredMachine.Spec.Bootstrap.ConfigRef = existingMachine.Spec.Bootstrap.ConfigRef + } + return desiredMachine, nil } diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index a5775b46d38b..ffde4eb284f0 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -34,7 +34,7 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/internal/test/builder" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" @@ -74,11 +74,17 @@ func TestReconcileKubeconfigEmptyAPIEndpoints(t *testing.T) { fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } - result, err := r.reconcileKubeconfig(ctx, cluster, kcp) + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + } + + result, err := r.reconcileKubeconfig(ctx, controlPlane) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(BeZero()) @@ -123,89 +129,26 @@ func TestReconcileKubeconfigMissingCACertificate(t *testing.T) { fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } - result, err := r.reconcileKubeconfig(ctx, cluster, kcp) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: dependentCertRequeueAfter})) - - kubeconfigSecret := &corev1.Secret{} - secretName := client.ObjectKey{ - Namespace: metav1.NamespaceDefault, - Name: secret.Name(cluster.Name, secret.Kubeconfig), + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, } - g.Expect(r.Client.Get(ctx, secretName, kubeconfigSecret)).To(MatchError(ContainSubstring("not found"))) -} - -func TestReconcileKubeconfigSecretAdoptsV1alpha2Secrets(t *testing.T) { - g := NewWithT(t) - cluster := &clusterv1.Cluster{ - TypeMeta: metav1.TypeMeta{ - Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: metav1.NamespaceDefault, - }, - Spec: clusterv1.ClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{Host: "test.local", Port: 8443}, - }, - } - - kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: metav1.NamespaceDefault, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - }, - } - - existingKubeconfigSecret := kubeconfig.GenerateSecretWithOwner( - client.ObjectKey{Name: "foo", Namespace: metav1.NamespaceDefault}, - []byte{}, - metav1.OwnerReference{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "Cluster", - Name: cluster.Name, - UID: cluster.UID, - }, // the Cluster ownership defines v1alpha2 controlled secrets - ) - - fakeClient := newFakeClient(kcp.DeepCopy(), existingKubeconfigSecret.DeepCopy()) - r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), - } - - result, err := r.reconcileKubeconfig(ctx, cluster, kcp) + result, err := r.reconcileKubeconfig(ctx, controlPlane) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(Equal(ctrl.Result{})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: dependentCertRequeueAfter})) kubeconfigSecret := &corev1.Secret{} secretName := client.ObjectKey{ Namespace: metav1.NamespaceDefault, Name: secret.Name(cluster.Name, secret.Kubeconfig), } - g.Expect(r.Client.Get(ctx, secretName, kubeconfigSecret)).To(Succeed()) - g.Expect(kubeconfigSecret.Labels).To(Equal(existingKubeconfigSecret.Labels)) - g.Expect(kubeconfigSecret.Data).To(Equal(existingKubeconfigSecret.Data)) - g.Expect(kubeconfigSecret.OwnerReferences).ToNot(ContainElement(metav1.OwnerReference{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "Cluster", - Name: cluster.Name, - UID: cluster.UID, - })) - g.Expect(kubeconfigSecret.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")))) + g.Expect(r.Client.Get(ctx, secretName, kubeconfigSecret)).To(MatchError(ContainSubstring("not found"))) } func TestReconcileKubeconfigSecretDoesNotAdoptsUserSecrets(t *testing.T) { @@ -258,11 +201,17 @@ func TestReconcileKubeconfigSecretDoesNotAdoptsUserSecrets(t *testing.T) { fakeClient := newFakeClient(kcp.DeepCopy(), existingKubeconfigSecret.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), + } + + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, } - result, err := r.reconcileKubeconfig(ctx, cluster, kcp) + result, err := r.reconcileKubeconfig(ctx, controlPlane) g.Expect(err).To(Succeed()) g.Expect(result).To(BeZero()) @@ -318,12 +267,19 @@ func TestKubeadmControlPlaneReconciler_reconcileKubeconfig(t *testing.T) { fakeClient := newFakeClient(kcp.DeepCopy(), existingCACertSecret.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } - result, err := r.reconcileKubeconfig(ctx, cluster, kcp) + + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + } + + result, err := r.reconcileKubeconfig(ctx, controlPlane) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(Equal(ctrl.Result{})) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) kubeconfigSecret := &corev1.Secret{} secretName := client.ObjectKey{ @@ -342,7 +298,7 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { t.Log("Creating the namespace") ns, err := env.CreateNamespace(ctx, "test-applykubeadmconfig") - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) return ns } @@ -404,8 +360,9 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: env, - recorder: record.NewFakeRecorder(32), + Client: env, + SecretCachingClient: secretCachingClient, + recorder: record.NewFakeRecorder(32), } bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{ @@ -417,23 +374,24 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { g.Expect(env.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) g.Expect(machineList.Items).To(HaveLen(1)) - for _, m := range machineList.Items { + for i := range machineList.Items { + m := machineList.Items[i] g.Expect(m.Namespace).To(Equal(cluster.Namespace)) g.Expect(m.Name).NotTo(BeEmpty()) g.Expect(m.Name).To(HavePrefix(kcp.Name)) infraObj, err := external.Get(ctx, r.Client, &m.Spec.InfrastructureRef, m.Spec.InfrastructureRef.Namespace) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName())) g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String())) g.Expect(m.Spec.InfrastructureRef.Namespace).To(Equal(cluster.Namespace)) - g.Expect(m.Spec.InfrastructureRef.Name).To(HavePrefix(genericInfrastructureMachineTemplate.GetName())) + g.Expect(m.Spec.InfrastructureRef.Name).To(Equal(m.Name)) g.Expect(m.Spec.InfrastructureRef.APIVersion).To(Equal(genericInfrastructureMachineTemplate.GetAPIVersion())) g.Expect(m.Spec.InfrastructureRef.Kind).To(Equal("GenericInfrastructureMachine")) g.Expect(m.Spec.Bootstrap.ConfigRef.Namespace).To(Equal(cluster.Namespace)) - g.Expect(m.Spec.Bootstrap.ConfigRef.Name).To(HavePrefix(kcp.Name)) + g.Expect(m.Spec.Bootstrap.ConfigRef.Name).To(Equal(m.Name)) g.Expect(m.Spec.Bootstrap.ConfigRef.APIVersion).To(Equal(bootstrapv1.GroupVersion.String())) g.Expect(m.Spec.Bootstrap.ConfigRef.Kind).To(Equal("KubeadmConfig")) } @@ -488,8 +446,9 @@ func TestCloneConfigsAndGenerateMachineFail(t *testing.T) { fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{ @@ -566,21 +525,16 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { t.Run("should return the correct Machine object when creating a new Machine", func(t *testing.T) { g := NewWithT(t) - failureDomain := pointer.String("fd1") + failureDomain := ptr.To("fd1") createdMachine, err := (&KubeadmControlPlaneReconciler{}).computeDesiredMachine( kcp, cluster, - infraRef, bootstrapRef, failureDomain, nil, ) - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) expectedMachineSpec := clusterv1.MachineSpec{ - ClusterName: cluster.Name, - Version: pointer.String(kcp.Spec.Version), - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: bootstrapRef, - }, - InfrastructureRef: *infraRef, + ClusterName: cluster.Name, + Version: ptr.To(kcp.Spec.Version), FailureDomain: failureDomain, NodeDrainTimeout: kcp.Spec.MachineTemplate.NodeDrainTimeout, NodeDeletionTimeout: kcp.Spec.MachineTemplate.NodeDeletionTimeout, @@ -590,7 +544,7 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { g.Expect(createdMachine.Namespace).To(Equal(kcp.Namespace)) g.Expect(createdMachine.OwnerReferences).To(HaveLen(1)) g.Expect(createdMachine.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")))) - g.Expect(createdMachine.Spec).To(Equal(expectedMachineSpec)) + g.Expect(createdMachine.Spec).To(BeComparableTo(expectedMachineSpec)) // Verify that the machineTemplate.ObjectMeta has been propagated to the Machine. // Verify labels. @@ -624,8 +578,8 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { // to verify that for an existing machine we do not override this information. existingClusterConfigurationString := "existing-cluster-configuration-information" remediationData := "remediation-data" - failureDomain := pointer.String("fd-1") - machineVersion := pointer.String("v1.25.3") + failureDomain := ptr.To("fd-1") + machineVersion := ptr.To("v1.25.3") existingMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: machineName, @@ -641,15 +595,18 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { NodeDrainTimeout: duration10s, NodeDeletionTimeout: duration10s, NodeVolumeDetachTimeout: duration10s, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: bootstrapRef, + }, + InfrastructureRef: *infraRef, }, } updatedMachine, err := (&KubeadmControlPlaneReconciler{}).computeDesiredMachine( kcp, cluster, - infraRef, bootstrapRef, existingMachine.Spec.FailureDomain, existingMachine, ) - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) expectedMachineSpec := clusterv1.MachineSpec{ ClusterName: cluster.Name, @@ -666,7 +623,7 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { g.Expect(updatedMachine.Namespace).To(Equal(kcp.Namespace)) g.Expect(updatedMachine.OwnerReferences).To(HaveLen(1)) g.Expect(updatedMachine.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")))) - g.Expect(updatedMachine.Spec).To(Equal(expectedMachineSpec)) + g.Expect(updatedMachine.Spec).To(BeComparableTo(expectedMachineSpec)) // Verify the Name and UID of the Machine remain unchanged g.Expect(updatedMachine.Name).To(Equal(machineName)) @@ -725,14 +682,15 @@ func TestKubeadmControlPlaneReconciler_generateKubeadmConfig(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), } - got, err := r.generateKubeadmConfig(ctx, kcp, cluster, spec.DeepCopy()) - g.Expect(err).NotTo(HaveOccurred()) + got, err := r.generateKubeadmConfig(ctx, kcp, cluster, spec.DeepCopy(), "kubeadmconfig-name") + g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).NotTo(BeNil()) - g.Expect(got.Name).To(HavePrefix(kcp.Name)) + g.Expect(got.Name).To(Equal("kubeadmconfig-name")) g.Expect(got.Namespace).To(Equal(kcp.Namespace)) g.Expect(got.Kind).To(Equal(expectedReferenceKind)) g.Expect(got.APIVersion).To(Equal(expectedReferenceAPIVersion)) @@ -742,7 +700,7 @@ func TestKubeadmControlPlaneReconciler_generateKubeadmConfig(t *testing.T) { g.Expect(fakeClient.Get(ctx, key, bootstrapConfig)).To(Succeed()) g.Expect(bootstrapConfig.OwnerReferences).To(HaveLen(1)) g.Expect(bootstrapConfig.OwnerReferences).To(ContainElement(expectedOwner)) - g.Expect(bootstrapConfig.Spec).To(Equal(spec)) + g.Expect(bootstrapConfig.Spec).To(BeComparableTo(spec)) } func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { @@ -752,15 +710,13 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { UID: "5", Kind: "OtherController", APIVersion: clusterv1.GroupVersion.String(), - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), } - clusterName := "test1" - cluster := builder.Cluster(metav1.NamespaceDefault, clusterName).Build() // A KubeadmConfig secret created by CAPI controllers with no owner references. capiKubeadmConfigSecretNoOwner := kubeconfig.GenerateSecretWithOwner( - client.ObjectKey{Name: clusterName, Namespace: metav1.NamespaceDefault}, + client.ObjectKey{Name: "test1", Namespace: metav1.NamespaceDefault}, []byte{}, metav1.OwnerReference{}) capiKubeadmConfigSecretNoOwner.OwnerReferences = []metav1.OwnerReference{} @@ -771,7 +727,7 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { // A user provided KubeadmConfig secret with no owner reference. userProvidedKubeadmConfigSecretNoOwner := kubeconfig.GenerateSecretWithOwner( - client.ObjectKey{Name: clusterName, Namespace: metav1.NamespaceDefault}, + client.ObjectKey{Name: "test1", Namespace: metav1.NamespaceDefault}, []byte{}, metav1.OwnerReference{}) userProvidedKubeadmConfigSecretNoOwner.Type = corev1.SecretTypeOpaque @@ -787,7 +743,7 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { }, ObjectMeta: metav1.ObjectMeta{ Name: "testControlPlane", - Namespace: cluster.Namespace, + Namespace: metav1.NamespaceDefault, }, } tests := []struct { @@ -803,8 +759,8 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { UID: kcp.UID, Kind: kcp.Kind, APIVersion: kcp.APIVersion, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, { @@ -815,8 +771,8 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { UID: kcp.UID, Kind: kcp.Kind, APIVersion: kcp.APIVersion, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, { @@ -831,16 +787,16 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fakeClient := newFakeClient(cluster, kcp, tt.configSecret) + t.Run(tt.name, func(*testing.T) { + fakeClient := newFakeClient(kcp, tt.configSecret) r := &KubeadmControlPlaneReconciler{ - APIReader: fakeClient, - Client: fakeClient, + Client: fakeClient, + SecretCachingClient: fakeClient, } - g.Expect(r.adoptKubeconfigSecret(ctx, cluster, tt.configSecret, kcp)).To(Succeed()) + g.Expect(r.adoptKubeconfigSecret(ctx, tt.configSecret, kcp)).To(Succeed()) actualSecret := &corev1.Secret{} - g.Expect(fakeClient.Get(ctx, client.ObjectKey{Namespace: tt.configSecret.Namespace, Name: tt.configSecret.Namespace}, actualSecret)) - g.Expect(tt.configSecret.GetOwnerReferences()).To(ConsistOf(tt.expectedOwnerRef)) + g.Expect(fakeClient.Get(ctx, client.ObjectKey{Namespace: tt.configSecret.Namespace, Name: tt.configSecret.Name}, actualSecret)).To(Succeed()) + g.Expect(actualSecret.GetOwnerReferences()).To(ConsistOf(tt.expectedOwnerRef)) }) } } diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index 72a0eb8236aa..52404d973651 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -22,20 +22,19 @@ import ( "fmt" "time" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/go-logr/logr" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" - "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -49,14 +48,14 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // Cleanup pending remediation actions not completed for any reasons (e.g. number of current replicas is less or equal to 1) // if the underlying machine is now back to healthy / not deleting. errList := []error{} - healthyMachines := controlPlane.HealthyMachines() + healthyMachines := controlPlane.HealthyMachinesByMachineHealthCheck() for _, m := range healthyMachines { if conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) && m.DeletionTimestamp.IsZero() { patchHelper, err := patch.NewHelper(m, r.Client) if err != nil { - errList = append(errList, errors.Wrapf(err, "failed to get PatchHelper for machine %s", m.Name)) + errList = append(errList, err) continue } @@ -65,7 +64,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C if err := patchHelper.Patch(ctx, m, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ clusterv1.MachineOwnerRemediatedCondition, }}); err != nil { - errList = append(errList, errors.Wrapf(err, "failed to patch machine %s", m.Name)) + errList = append(errList, err) } } } @@ -75,19 +74,20 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // Gets all machines that have `MachineHealthCheckSucceeded=False` (indicating a problem was detected on the machine) // and `MachineOwnerRemediated` present, indicating that this controller is responsible for performing remediation. - unhealthyMachines := controlPlane.UnhealthyMachines() + unhealthyMachines := controlPlane.UnhealthyMachinesByMachineHealthCheck() // If there are no unhealthy machines, return so KCP can proceed with other operations (ctrl.Result nil). if len(unhealthyMachines) == 0 { return ctrl.Result{}, nil } - // Select the machine to be remediated, which is the oldest machine marked as unhealthy. + // Select the machine to be remediated, which is the oldest machine marked as unhealthy not yet provisioned (if any) + // or the oldest machine marked as unhealthy. // // NOTE: The current solution is considered acceptable for the most frequent use case (only one unhealthy machine), // however, in the future this could potentially be improved for the scenario where more than one unhealthy machine exists // by considering which machine has lower impact on etcd quorum. - machineToBeRemediated := unhealthyMachines.Oldest() + machineToBeRemediated := getMachineToBeRemediated(unhealthyMachines) // Returns if the machine is in the process of being deleted. if !machineToBeRemediated.ObjectMeta.DeletionTimestamp.IsZero() { @@ -99,9 +99,29 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // Returns if another remediation is in progress but the new Machine is not yet created. // Note: This condition is checked after we check for unhealthy Machines and if machineToBeRemediated // is being deleted to avoid unnecessary logs if no further remediation should be done. - if _, ok := controlPlane.KCP.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok { - log.Info("Another remediation is already in progress. Skipping remediation.") - return ctrl.Result{}, nil + if v, ok := controlPlane.KCP.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok { + // Check if the annotation is stale; this might happen in case there is a crash in the controller in between + // when a new Machine is created and the annotation is eventually removed from KCP via defer patch at the end + // of KCP reconcile. + remediationData, err := RemediationDataFromAnnotation(v) + if err != nil { + return ctrl.Result{}, err + } + + staleAnnotation := false + for _, m := range controlPlane.Machines.UnsortedList() { + if m.CreationTimestamp.After(remediationData.Timestamp.Time) { + // Remove the annotation tracking that a remediation is in progress (the annotation is stale). + delete(controlPlane.KCP.Annotations, controlplanev1.RemediationInProgressAnnotation) + staleAnnotation = true + break + } + } + + if !staleAnnotation { + log.Info("Another remediation is already in progress. Skipping remediation.") + return ctrl.Result{}, nil + } } patchHelper, err := patch.NewHelper(machineToBeRemediated, r.Client) @@ -143,11 +163,18 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // The cluster MUST have more than one replica, because this is the smallest cluster size that allows any etcd failure tolerance. if controlPlane.Machines.Len() <= 1 { - log.Info("A control plane machine needs remediation, but the number of current replicas is less or equal to 1. Skipping remediation", "Replicas", controlPlane.Machines.Len()) + log.Info("A control plane machine needs remediation, but the number of current replicas is less or equal to 1. Skipping remediation", "replicas", controlPlane.Machines.Len()) conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate if current replicas are less or equal to 1") return ctrl.Result{}, nil } + // The cluster MUST NOT have healthy machines still being provisioned. This rule prevents KCP taking actions while the cluster is in a transitional state. + if controlPlane.HasHealthyMachineStillProvisioning() { + log.Info("A control plane machine needs remediation, but there are other control-plane machines being provisioned. Skipping remediation") + conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine provisioning to complete before triggering remediation") + return ctrl.Result{}, nil + } + // The cluster MUST have no machines with a deletion timestamp. This rule prevents KCP taking actions while the cluster is in a transitional state. if controlPlane.HasDeletingMachine() { log.Info("A control plane machine needs remediation, but there are other control-plane machines being deleted. Skipping remediation") @@ -177,7 +204,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // - if the machine hosts the etcd leader, forward etcd leadership to another machine. // - delete the etcd member hosted on the machine being deleted. // - remove the etcd member from the kubeadm config map (only for kubernetes version older than v1.22.0) - workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(controlPlane.Cluster)) + workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { log.Error(err, "Failed to create client to workload cluster") return ctrl.Result{}, errors.Wrapf(err, "failed to create client to workload cluster") @@ -185,7 +212,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // If the machine that is about to be deleted is the etcd leader, move it to the newest member available. if controlPlane.IsEtcdManaged() { - etcdLeaderCandidate := controlPlane.HealthyMachines().Newest() + etcdLeaderCandidate := controlPlane.HealthyMachinesByMachineHealthCheck().Newest() if etcdLeaderCandidate == nil { log.Info("A control plane machine needs remediation, but there is no healthy machine to forward etcd leadership to") conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityWarning, @@ -240,6 +267,16 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C return ctrl.Result{Requeue: true}, nil } +// Gets the machine to be remediated, which is the oldest machine marked as unhealthy not yet provisioned (if any) +// or the oldest machine marked as unhealthy. +func getMachineToBeRemediated(unhealthyMachines collections.Machines) *clusterv1.Machine { + machineToBeRemediated := unhealthyMachines.Filter(collections.Not(collections.HasNode())).Oldest() + if machineToBeRemediated == nil { + machineToBeRemediated = unhealthyMachines.Oldest() + } + return machineToBeRemediated +} + // checkRetryLimits checks if KCP is allowed to remediate considering retry limits: // - Remediation cannot happen because retryPeriod is not yet expired. // - KCP already reached the maximum number of retries for a machine. @@ -295,7 +332,7 @@ func (r *KubeadmControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin var retryForSameMachineInProgress bool if lastRemediationTime.Add(minHealthyPeriod).After(reconciliationTime) { retryForSameMachineInProgress = true - log = log.WithValues("RemediationRetryFor", klog.KRef(machineToBeRemediated.Namespace, lastRemediationData.Machine)) + log = log.WithValues("remediationRetryFor", klog.KRef(machineToBeRemediated.Namespace, lastRemediationData.Machine)) } // If the retry for the same machine is not in progress, this is the first try of a new retry sequence. @@ -329,14 +366,6 @@ func (r *KubeadmControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin return remediationInProgressData, true, nil } -// max calculates the maximum duration. -func max(x, y time.Duration) time.Duration { - if x < y { - return y - } - return x -} - // canSafelyRemoveEtcdMember assess if it is possible to remove the member hosted on the machine to be remediated // without loosing etcd quorum. // @@ -355,10 +384,7 @@ func max(x, y time.Duration) time.Duration { func (r *KubeadmControlPlaneReconciler) canSafelyRemoveEtcdMember(ctx context.Context, controlPlane *internal.ControlPlane, machineToBeRemediated *clusterv1.Machine) (bool, error) { log := ctrl.LoggerFrom(ctx) - workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, client.ObjectKey{ - Namespace: controlPlane.Cluster.Namespace, - Name: controlPlane.Cluster.Name, - }) + workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { return false, errors.Wrapf(err, "failed to get client for workload cluster %s", controlPlane.Cluster.Name) } @@ -406,7 +432,7 @@ func (r *KubeadmControlPlaneReconciler) canSafelyRemoveEtcdMember(ctx context.Co // // NOTE: This should not happen given that KCP is running reconcileEtcdMembers before calling this method. if machine == nil { - log.Info("An etcd member does not have a corresponding machine, assuming this member is unhealthy", "MemberName", etcdMember) + log.Info("An etcd member does not have a corresponding machine, assuming this member is unhealthy", "memberName", etcdMember) targetUnhealthyMembers++ unhealthyMembers = append(unhealthyMembers, fmt.Sprintf("%s (no machine)", etcdMember)) continue diff --git a/controlplane/kubeadm/internal/controllers/remediation_test.go b/controlplane/kubeadm/internal/controllers/remediation_test.go index 26183baa1cc7..ab4abc7bd220 100644 --- a/controlplane/kubeadm/internal/controllers/remediation_test.go +++ b/controlplane/kubeadm/internal/controllers/remediation_test.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" - utilpointer "k8s.io/utils/pointer" + utilptr "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -40,6 +40,43 @@ import ( "sigs.k8s.io/cluster-api/util/patch" ) +func TestGetMachineToBeRemediated(t *testing.T) { + t.Run("returns the oldest machine if there are no provisioning machines", func(t *testing.T) { + g := NewWithT(t) + + ns, err := env.CreateNamespace(ctx, "ns1") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { + g.Expect(env.Cleanup(ctx, ns)).To(Succeed()) + }() + + m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed()) + m2 := createMachine(ctx, g, ns.Name, "m2-unhealthy-", withMachineHealthCheckFailed()) + + unhealthyMachines := collections.FromMachines(m1, m2) + + g.Expect(getMachineToBeRemediated(unhealthyMachines).Name).To(HavePrefix("m1-unhealthy-")) + }) + + t.Run("returns the oldest of the provisioning machines", func(t *testing.T) { + g := NewWithT(t) + + ns, err := env.CreateNamespace(ctx, "ns1") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { + g.Expect(env.Cleanup(ctx, ns)).To(Succeed()) + }() + + m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed()) + m2 := createMachine(ctx, g, ns.Name, "m2-unhealthy-", withMachineHealthCheckFailed(), withoutNodeRef()) + m3 := createMachine(ctx, g, ns.Name, "m3-unhealthy-", withMachineHealthCheckFailed(), withoutNodeRef()) + + unhealthyMachines := collections.FromMachines(m1, m2, m3) + + g.Expect(getMachineToBeRemediated(unhealthyMachines).Name).To(HavePrefix("m2-unhealthy-")) + }) +} + func TestReconcileUnhealthyMachines(t *testing.T) { g := NewWithT(t) @@ -57,7 +94,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { patchHelper, err := patch.NewHelper(m, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) m.ObjectMeta.Finalizers = nil - g.Expect(patchHelper.Patch(ctx, m)) + g.Expect(patchHelper.Patch(ctx, m)).To(Succeed()) } t.Run("It cleans up stuck remediation on previously unhealthy machines", func(t *testing.T) { @@ -129,6 +166,47 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(ret.IsZero()).To(BeTrue()) // Remediation skipped g.Expect(err).ToNot(HaveOccurred()) }) + t.Run("remediation in progress is ignored when stale", func(t *testing.T) { + g := NewWithT(t) + + m := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withStuckRemediation(), withWaitBeforeDeleteFinalizer()) + conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") + conditions.MarkFalse(m, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") + controlPlane := &internal.ControlPlane{ + KCP: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controlplanev1.RemediationInProgressAnnotation: MustMarshalRemediationData(&RemediationData{ + Machine: "foo", + Timestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour).UTC()}, + RetryCount: 0, + }), + }, + }, + }, + Cluster: &clusterv1.Cluster{}, + Machines: collections.FromMachines(m), + } + ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) + + g.Expect(ret.IsZero()).To(BeFalse()) // Remediation completed, requeue + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(controlPlane.KCP.Annotations).To(HaveKey(controlplanev1.RemediationInProgressAnnotation)) + remediationData, err := RemediationDataFromAnnotation(controlPlane.KCP.Annotations[controlplanev1.RemediationInProgressAnnotation]) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(remediationData.Machine).To(Equal(m.Name)) + g.Expect(remediationData.RetryCount).To(Equal(0)) + + assertMachineCondition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + + err = env.Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Name}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(m.ObjectMeta.DeletionTimestamp.IsZero()).To(BeFalse()) + + removeFinalizer(g, m) + g.Expect(env.Cleanup(ctx, m)).To(Succeed()) + }) t.Run("reconcileUnhealthyMachines return early if the machine to be remediated is already being deleted", func(t *testing.T) { g := NewWithT(t) @@ -161,10 +239,10 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RemediationStrategy: &controlplanev1.RemediationStrategy{ - MaxRetry: utilpointer.Int32(3), + MaxRetry: utilptr.To[int32](3), }, }, }, @@ -212,10 +290,10 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RemediationStrategy: &controlplanev1.RemediationStrategy{ - MaxRetry: utilpointer.Int32(3), + MaxRetry: utilptr.To[int32](3), }, }, }, @@ -269,10 +347,10 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RemediationStrategy: &controlplanev1.RemediationStrategy{ - MaxRetry: utilpointer.Int32(3), + MaxRetry: utilptr.To[int32](3), MinHealthyPeriod: &metav1.Duration{Duration: minHealthyPeriod}, }, }, @@ -325,10 +403,10 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RemediationStrategy: &controlplanev1.RemediationStrategy{ - MaxRetry: utilpointer.Int32(3), + MaxRetry: utilptr.To[int32](3), RetryPeriod: metav1.Duration{Duration: controlplanev1.DefaultMinHealthyPeriod}, // RetryPeriod not yet expired. }, }, @@ -376,7 +454,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(1), + Replicas: utilptr.To[int32](1), RolloutStrategy: &controlplanev1.RolloutStrategy{ RollingUpdate: &controlplanev1.RollingUpdate{ MaxSurge: &intstr.IntOrString{ @@ -412,7 +490,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }, Status: controlplanev1.KubeadmControlPlaneStatus{ Initialized: true, @@ -432,6 +510,65 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) + t.Run("Remediation does not happen if there is an healthy machine being provisioned", func(t *testing.T) { + g := NewWithT(t) + + m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed()) + m2 := createMachine(ctx, g, ns.Name, "m2-healthy-") + m3 := createMachine(ctx, g, ns.Name, "m3-healthy-", withoutNodeRef()) // Provisioning + controlPlane := &internal.ControlPlane{ + KCP: &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Replicas: utilptr.To(int32(3)), + }, + Status: controlplanev1.KubeadmControlPlaneStatus{ + Initialized: true, + }, + }, + Cluster: &clusterv1.Cluster{}, + Machines: collections.FromMachines(m1, m2, m3), + } + ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) + + g.Expect(ret.IsZero()).To(BeTrue()) // Remediation skipped + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) + + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine provisioning to complete before triggering remediation") + + g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) + }) + t.Run("Remediation does not happen if there is an healthy machine being provisioned - 4 CP (during 3 CP rolling upgrade)", func(t *testing.T) { + g := NewWithT(t) + + m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed()) + m2 := createMachine(ctx, g, ns.Name, "m2-healthy-") + m3 := createMachine(ctx, g, ns.Name, "m3-healthy-") + m4 := createMachine(ctx, g, ns.Name, "m4-healthy-", withoutNodeRef()) // Provisioning + controlPlane := &internal.ControlPlane{ + KCP: &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Replicas: utilptr.To(int32(3)), + }, + Status: controlplanev1.KubeadmControlPlaneStatus{ + Initialized: true, + }, + }, + Cluster: &clusterv1.Cluster{}, + Machines: collections.FromMachines(m1, m2, m3, m4), + } + ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) + + g.Expect(ret.IsZero()).To(BeTrue()) // Remediation skipped + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) + + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine provisioning to complete before triggering remediation") + + g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) + }) t.Run("Remediation does not happen if there is at least one additional unhealthy etcd member on a 3 machine CP", func(t *testing.T) { g := NewWithT(t) @@ -442,7 +579,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }, Status: controlplanev1.KubeadmControlPlaneStatus{ Initialized: true, @@ -461,6 +598,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) @@ -485,7 +623,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(5), + Replicas: utilptr.To[int32](5), }, Status: controlplanev1.KubeadmControlPlaneStatus{ Initialized: true, @@ -504,6 +642,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) @@ -527,7 +666,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(1), + Replicas: utilptr.To[int32](1), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -576,7 +715,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(1), + Replicas: utilptr.To[int32](1), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -666,7 +805,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(2), + Replicas: utilptr.To[int32](2), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -686,6 +825,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) @@ -717,7 +857,59 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), + Version: "v1.19.1", + }, + Status: controlplanev1.KubeadmControlPlaneStatus{ + Initialized: true, + }, + }, + Cluster: &clusterv1.Cluster{}, + Machines: collections.FromMachines(m1, m2, m3), + } + + r := &KubeadmControlPlaneReconciler{ + Client: env.GetClient(), + recorder: record.NewFakeRecorder(32), + managementCluster: &fakeManagementCluster{ + Workload: fakeWorkloadCluster{ + EtcdMembersResult: nodes(controlPlane.Machines), + }, + }, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) + + g.Expect(ret.IsZero()).To(BeFalse()) // Remediation completed, requeue + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(controlPlane.KCP.Annotations).To(HaveKey(controlplanev1.RemediationInProgressAnnotation)) + remediationData, err := RemediationDataFromAnnotation(controlPlane.KCP.Annotations[controlplanev1.RemediationInProgressAnnotation]) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(remediationData.Machine).To(Equal(m1.Name)) + g.Expect(remediationData.RetryCount).To(Equal(0)) + + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + + err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(m1.ObjectMeta.DeletionTimestamp.IsZero()).To(BeFalse()) + + removeFinalizer(g, m1) + g.Expect(env.Cleanup(ctx, m1, m2, m3)).To(Succeed()) + }) + t.Run("Remediation deletes unhealthy machine failed to provision - 3 CP", func(t *testing.T) { + g := NewWithT(t) + + m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed(), withWaitBeforeDeleteFinalizer(), withoutNodeRef()) + m2 := createMachine(ctx, g, ns.Name, "m2-healthy-", withHealthyEtcdMember()) + m3 := createMachine(ctx, g, ns.Name, "m3-healthy-", withHealthyEtcdMember()) + + controlPlane := &internal.ControlPlane{ + KCP: &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Replicas: utilptr.To(int32(3)), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -737,6 +929,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) @@ -769,7 +962,60 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(4), + Replicas: utilptr.To[int32](4), + Version: "v1.19.1", + }, + Status: controlplanev1.KubeadmControlPlaneStatus{ + Initialized: true, + }, + }, + Cluster: &clusterv1.Cluster{}, + Machines: collections.FromMachines(m1, m2, m3, m4), + } + + r := &KubeadmControlPlaneReconciler{ + Client: env.GetClient(), + recorder: record.NewFakeRecorder(32), + managementCluster: &fakeManagementCluster{ + Workload: fakeWorkloadCluster{ + EtcdMembersResult: nodes(controlPlane.Machines), + }, + }, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) + + g.Expect(ret.IsZero()).To(BeFalse()) // Remediation completed, requeue + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(controlPlane.KCP.Annotations).To(HaveKey(controlplanev1.RemediationInProgressAnnotation)) + remediationData, err := RemediationDataFromAnnotation(controlPlane.KCP.Annotations[controlplanev1.RemediationInProgressAnnotation]) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(remediationData.Machine).To(Equal(m1.Name)) + g.Expect(remediationData.RetryCount).To(Equal(0)) + + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + + err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(m1.ObjectMeta.DeletionTimestamp.IsZero()).To(BeFalse()) + + removeFinalizer(g, m1) + g.Expect(env.Cleanup(ctx, m1, m2, m3, m4)).To(Succeed()) + }) + t.Run("Remediation deletes unhealthy machine failed to provision - 4 CP (during 3 CP rolling upgrade)", func(t *testing.T) { + g := NewWithT(t) + + m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed(), withWaitBeforeDeleteFinalizer(), withoutNodeRef()) + m2 := createMachine(ctx, g, ns.Name, "m2-healthy-", withHealthyEtcdMember()) + m3 := createMachine(ctx, g, ns.Name, "m3-healthy-", withHealthyEtcdMember()) + m4 := createMachine(ctx, g, ns.Name, "m4-healthy-", withHealthyEtcdMember()) + + controlPlane := &internal.ControlPlane{ + KCP: &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Replicas: utilptr.To(int32(4)), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -789,6 +1035,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) @@ -821,7 +1068,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(4), + Replicas: utilptr.To[int32](4), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -841,6 +1088,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) + _, err = r.reconcileUnhealthyMachines(ctx, controlPlane) g.Expect(err).ToNot(HaveOccurred()) @@ -862,7 +1111,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(1), + Replicas: utilptr.To[int32](1), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -949,7 +1198,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { patchHelper, err := patch.NewHelper(m, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) m.ObjectMeta.Finalizers = nil - g.Expect(patchHelper.Patch(ctx, m)) + g.Expect(patchHelper.Patch(ctx, m)).To(Succeed()) } t.Run("Remediates the first CP machine having problems to come up", func(t *testing.T) { @@ -968,7 +1217,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -1070,7 +1319,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RolloutStrategy: &controlplanev1.RolloutStrategy{ RollingUpdate: &controlplanev1.RollingUpdate{ @@ -1097,6 +1346,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) @@ -1132,6 +1382,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { EtcdMembersResult: nodes(controlPlane.Machines), }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err = r.reconcileUnhealthyMachines(ctx, controlPlane) @@ -1180,7 +1431,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RolloutStrategy: &controlplanev1.RolloutStrategy{ RollingUpdate: &controlplanev1.RollingUpdate{ @@ -1207,6 +1458,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.reconcileUnhealthyMachines(ctx, controlPlane) @@ -1263,7 +1515,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(1), + Replicas: utilptr.To[int32](1), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1), @@ -1278,6 +1530,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, m1) g.Expect(ret).To(BeFalse()) @@ -1294,7 +1547,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2), @@ -1309,6 +1562,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, m1) g.Expect(ret).To(BeTrue()) @@ -1324,7 +1578,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2), @@ -1346,6 +1600,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, m1) g.Expect(ret).To(BeTrue()) @@ -1361,7 +1616,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2), @@ -1376,6 +1631,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, m1) g.Expect(ret).To(BeFalse()) @@ -1392,7 +1648,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3), @@ -1407,6 +1663,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, m1) g.Expect(ret).To(BeTrue()) @@ -1423,7 +1680,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3), @@ -1445,6 +1702,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, m1) g.Expect(ret).To(BeTrue()) @@ -1461,7 +1719,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3), @@ -1476,6 +1734,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, m1) g.Expect(ret).To(BeFalse()) @@ -1494,7 +1753,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(5), + Replicas: utilptr.To[int32](5), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3, m4, m5), @@ -1509,6 +1768,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, m1) g.Expect(ret).To(BeTrue()) @@ -1527,7 +1787,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(7), + Replicas: utilptr.To[int32](7), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3, m4, m5), @@ -1542,6 +1802,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, m1) g.Expect(ret).To(BeFalse()) @@ -1562,7 +1823,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(7), + Replicas: utilptr.To[int32](7), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3, m4, m5, m6, m7), @@ -1577,6 +1838,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, m1) g.Expect(ret).To(BeTrue()) @@ -1597,7 +1859,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(5), + Replicas: utilptr.To[int32](5), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3, m4, m5, m6, m7), @@ -1612,6 +1874,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { }, }, } + controlPlane.InjectTestManagementCluster(r.managementCluster) ret, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, m1) g.Expect(ret).To(BeFalse()) @@ -1659,6 +1922,12 @@ func withUnhealthyEtcdMember() machineOption { } } +func withUnhealthyAPIServerPod() machineOption { + return func(machine *clusterv1.Machine) { + conditions.MarkFalse(machine, controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "") + } +} + func withNodeRef(ref string) machineOption { return func(machine *clusterv1.Machine) { machine.Status.NodeRef = &corev1.ObjectReference{ @@ -1668,6 +1937,12 @@ func withNodeRef(ref string) machineOption { } } +func withoutNodeRef() machineOption { + return func(machine *clusterv1.Machine) { + machine.Status.NodeRef = nil + } +} + func withRemediateForAnnotation(remediatedFor string) machineOption { return func(machine *clusterv1.Machine) { if machine.Annotations == nil { @@ -1692,7 +1967,7 @@ func createMachine(ctx context.Context, g *WithT, namespace, name string, option Spec: clusterv1.MachineSpec{ ClusterName: "cluster", Bootstrap: clusterv1.Bootstrap{ - DataSecretName: utilpointer.String("secret"), + DataSecretName: utilptr.To("secret"), }, }, } @@ -1701,11 +1976,11 @@ func createMachine(ctx context.Context, g *WithT, namespace, name string, option patchHelper, err := patch.NewHelper(m, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) - for _, opt := range append(options, withNodeRef(fmt.Sprintf("node-%s", m.Name))) { + for _, opt := range append([]machineOption{withNodeRef(fmt.Sprintf("node-%s", m.Name))}, options...) { opt(m) } - g.Expect(patchHelper.Patch(ctx, m)) + g.Expect(patchHelper.Patch(ctx, m)).To(Succeed()) return m } @@ -1720,12 +1995,12 @@ func getDeletingMachine(namespace, name string, options ...machineOption) *clust Spec: clusterv1.MachineSpec{ ClusterName: "cluster", Bootstrap: clusterv1.Bootstrap{ - DataSecretName: utilpointer.String("secret"), + DataSecretName: utilptr.To("secret"), }, }, } - for _, opt := range append(options, withNodeRef(fmt.Sprintf("node-%s", m.Name))) { + for _, opt := range append([]machineOption{withNodeRef(fmt.Sprintf("node-%s", m.Name))}, options...) { opt(m) } return m diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index e467dd64d5c6..3d43cdb5d9c8 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -20,7 +20,7 @@ import ( "context" "strings" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -31,33 +31,22 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" - "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" ) -func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) { +func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { logger := ctrl.LoggerFrom(ctx) - // Perform an uncached read of all the owned machines. This check is in place to make sure - // that the controller cache is not misbehaving and we end up initializing the cluster more than once. - ownedMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, cluster, collections.OwnedMachines(kcp)) + bootstrapSpec := controlPlane.InitialControlPlaneConfig() + fd, err := controlPlane.NextFailureDomainForScaleUp(ctx) if err != nil { - logger.Error(err, "failed to perform an uncached read of control plane machines for cluster") return ctrl.Result{}, err } - if len(ownedMachines) > 0 { - return ctrl.Result{}, errors.Errorf( - "control plane has already been initialized, found %d owned machine for cluster %s/%s: controller cache or management cluster is misbehaving", - len(ownedMachines), cluster.Namespace, cluster.Name, - ) - } - bootstrapSpec := controlPlane.InitialControlPlaneConfig() - fd := controlPlane.NextFailureDomainForScaleUp() - if err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, fd); err != nil { + if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil { logger.Error(err, "Failed to create initial control plane Machine") - r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s/%s control plane: %v", cluster.Namespace, cluster.Name, err) + r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s control plane: %v", klog.KObj(controlPlane.Cluster), err) return ctrl.Result{}, err } @@ -65,7 +54,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte return ctrl.Result{Requeue: true}, nil } -func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) { +func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { logger := ctrl.LoggerFrom(ctx) // Run preflight checks to ensure that the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. @@ -75,10 +64,14 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, // Create the bootstrap configuration bootstrapSpec := controlPlane.JoinControlPlaneConfig() - fd := controlPlane.NextFailureDomainForScaleUp() - if err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, fd); err != nil { + fd, err := controlPlane.NextFailureDomainForScaleUp(ctx) + if err != nil { + return ctrl.Result{}, err + } + + if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil { logger.Error(err, "Failed to create additional control plane Machine") - r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster %s/%s control plane: %v", cluster.Namespace, cluster.Name, err) + r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster % control plane: %v", klog.KObj(controlPlane.Cluster), err) return ctrl.Result{}, err } @@ -88,15 +81,13 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( ctx context.Context, - cluster *clusterv1.Cluster, - kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane, outdatedMachines collections.Machines, ) (ctrl.Result, error) { logger := ctrl.LoggerFrom(ctx) // Pick the Machine that we should scale down. - machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines) + machineToDelete, err := selectMachineForScaleDown(ctx, controlPlane, outdatedMachines) if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to select machine for scale down") } @@ -107,7 +98,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( return result, err } - workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster)) + workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { logger.Error(err, "Failed to create client to workload cluster") return ctrl.Result{}, errors.Wrapf(err, "failed to create client to workload cluster") @@ -131,9 +122,9 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( } } - parsedVersion, err := semver.ParseTolerant(kcp.Spec.Version) + parsedVersion, err := semver.ParseTolerant(controlPlane.KCP.Spec.Version) if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kcp.Spec.Version) + return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) } if err := workloadCluster.RemoveMachineFromKubeadmConfigMap(ctx, machineToDelete, parsedVersion); err != nil { @@ -144,8 +135,8 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( logger = logger.WithValues("Machine", klog.KObj(machineToDelete)) if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to delete control plane machine") - r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleDown", - "Failed to delete control plane Machine %s for cluster %s/%s control plane: %v", machineToDelete.Name, cluster.Namespace, cluster.Name, err) + r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleDown", + "Failed to delete control plane Machine %s for cluster %s control plane: %v", machineToDelete.Name, klog.KObj(controlPlane.Cluster), err) return ctrl.Result{}, err } @@ -172,7 +163,7 @@ func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, con // If there are deleting machines, wait for the operation to complete. if controlPlane.HasDeletingMachine() { - logger.Info("Waiting for machines to be deleted", "Machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", ")) + logger.Info("Waiting for machines to be deleted", "machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", ")) return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil } @@ -200,9 +191,17 @@ loopmachines: } } - for _, condition := range allMachineHealthConditions { - if err := preflightCheckCondition("machine", machine, condition); err != nil { - machineErrors = append(machineErrors, err) + if machine.Status.NodeRef == nil { + // The conditions will only ever be set on a Machine if we're able to correlate a Machine to a Node. + // Correlating Machines to Nodes requires the nodeRef to be set. + // Instead of confusing users with errors about that the conditions are not set, let's point them + // towards the unset nodeRef (which is the root cause of the conditions not being there). + machineErrors = append(machineErrors, errors.Errorf("Machine %s does not have a corresponding Node yet (Machine.status.nodeRef not set)", machine.Name)) + } else { + for _, condition := range allMachineHealthConditions { + if err := preflightCheckCondition("Machine", machine, condition); err != nil { + machineErrors = append(machineErrors, err) + } } } } @@ -232,15 +231,17 @@ func preflightCheckCondition(kind string, obj conditions.Getter, condition clust return nil } -func selectMachineForScaleDown(controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) { +func selectMachineForScaleDown(ctx context.Context, controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) { machines := controlPlane.Machines switch { case controlPlane.MachineWithDeleteAnnotation(outdatedMachines).Len() > 0: machines = controlPlane.MachineWithDeleteAnnotation(outdatedMachines) case controlPlane.MachineWithDeleteAnnotation(machines).Len() > 0: machines = controlPlane.MachineWithDeleteAnnotation(machines) + case controlPlane.UnhealthyMachinesWithUnhealthyControlPlaneComponents(outdatedMachines).Len() > 0: + machines = controlPlane.UnhealthyMachinesWithUnhealthyControlPlaneComponents(outdatedMachines) case outdatedMachines.Len() > 0: machines = outdatedMachines } - return controlPlane.MachineInFailureDomainWithMostMachines(machines) + return controlPlane.MachineInFailureDomainWithMostMachines(ctx, machines) } diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index 1cfdbc96f3db..d35db435bdfe 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -45,7 +45,7 @@ func TestKubeadmControlPlaneReconciler_initializeControlPlane(t *testing.T) { t.Log("Creating the namespace") ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-initializecontrolplane") - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) return ns } @@ -78,28 +78,28 @@ func TestKubeadmControlPlaneReconciler_initializeControlPlane(t *testing.T) { KCP: kcp, } - result, err := r.initializeControlPlane(ctx, cluster, kcp, controlPlane) - g.Expect(result).To(Equal(ctrl.Result{Requeue: true})) - g.Expect(err).NotTo(HaveOccurred()) + result, err := r.initializeControlPlane(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(err).ToNot(HaveOccurred()) machineList := &clusterv1.MachineList{} g.Expect(env.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) g.Expect(machineList.Items).To(HaveLen(1)) - res, err := collections.GetFilteredMachinesForCluster(ctx, env, cluster, collections.OwnedMachines(kcp)) + res, err := collections.GetFilteredMachinesForCluster(ctx, env.GetAPIReader(), cluster, collections.OwnedMachines(kcp)) g.Expect(res).To(HaveLen(1)) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(machineList.Items[0].Namespace).To(Equal(cluster.Namespace)) g.Expect(machineList.Items[0].Name).To(HavePrefix(kcp.Name)) g.Expect(machineList.Items[0].Spec.InfrastructureRef.Namespace).To(Equal(cluster.Namespace)) - g.Expect(machineList.Items[0].Spec.InfrastructureRef.Name).To(HavePrefix(genericInfrastructureMachineTemplate.GetName())) + g.Expect(machineList.Items[0].Spec.InfrastructureRef.Name).To(Equal(machineList.Items[0].Name)) g.Expect(machineList.Items[0].Spec.InfrastructureRef.APIVersion).To(Equal(genericInfrastructureMachineTemplate.GetAPIVersion())) g.Expect(machineList.Items[0].Spec.InfrastructureRef.Kind).To(Equal("GenericInfrastructureMachine")) g.Expect(machineList.Items[0].Spec.Bootstrap.ConfigRef.Namespace).To(Equal(cluster.Namespace)) - g.Expect(machineList.Items[0].Spec.Bootstrap.ConfigRef.Name).To(HavePrefix(kcp.Name)) + g.Expect(machineList.Items[0].Spec.Bootstrap.ConfigRef.Name).To(Equal(machineList.Items[0].Name)) g.Expect(machineList.Items[0].Spec.Bootstrap.ConfigRef.APIVersion).To(Equal(bootstrapv1.GroupVersion.String())) g.Expect(machineList.Items[0].Spec.Bootstrap.ConfigRef.Kind).To(Equal("KubeadmConfig")) } @@ -111,7 +111,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { t.Log("Creating the namespace") ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-scaleupcontrolplane") - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) return ns } @@ -155,8 +155,8 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { Machines: fmc.Machines, } - result, err := r.scaleUpControlPlane(ctx, cluster, kcp, controlPlane) - g.Expect(result).To(Equal(ctrl.Result{Requeue: true})) + result, err := r.scaleUpControlPlane(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) g.Expect(err).ToNot(HaveOccurred()) controlPlaneMachines := clusterv1.MachineList{} @@ -172,7 +172,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { t.Log("Creating the namespace") ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-scaleupcontrolplane") - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) return ns } @@ -209,16 +209,19 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { r := &KubeadmControlPlaneReconciler{ Client: env, - APIReader: env.GetAPIReader(), + SecretCachingClient: secretCachingClient, managementCluster: fmc, managementClusterUncached: fmc, recorder: record.NewFakeRecorder(32), - disableInPlacePropagation: true, } - result, err := r.reconcile(context.Background(), cluster, kcp) + controlPlane, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(adoptableMachineFound).To(BeFalse()) + + result, err := r.scaleUpControlPlane(context.Background(), controlPlane) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) // scaleUpControlPlane is never called due to health check failure and new machine is not created to scale up. controlPlaneMachines := &clusterv1.MachineList{} @@ -226,13 +229,13 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { // No new machine should be created. // Note: expected length is 0 because no machine is created and hence no machine is on the API server. // Other machines are in-memory only during the test. - g.Expect(controlPlaneMachines.Items).To(HaveLen(0)) + g.Expect(controlPlaneMachines.Items).To(BeEmpty()) endMachines := collections.FromMachineList(controlPlaneMachines) for _, m := range endMachines { bm, ok := beforeMachines[m.Name] g.Expect(ok).To(BeTrue()) - g.Expect(m).To(Equal(bm)) + g.Expect(m).To(BeComparableTo(bm)) } }) } @@ -248,8 +251,9 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. fakeClient := newFakeClient(machines["one"]) r := &KubeadmControlPlaneReconciler{ - recorder: record.NewFakeRecorder(32), - Client: fakeClient, + recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{}, }, @@ -267,10 +271,11 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. Cluster: cluster, Machines: machines, } + controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err := r.scaleDownControlPlane(context.Background(), cluster, kcp, controlPlane, controlPlane.Machines) + result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(Equal(ctrl.Result{Requeue: true})) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) controlPlaneMachines := clusterv1.MachineList{} g.Expect(fakeClient.List(context.Background(), &controlPlaneMachines)).To(Succeed()) @@ -289,8 +294,9 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. fakeClient := newFakeClient(machines["one"], machines["two"], machines["three"]) r := &KubeadmControlPlaneReconciler{ - recorder: record.NewFakeRecorder(32), - Client: fakeClient, + recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{}, }, @@ -307,10 +313,11 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. Cluster: cluster, Machines: machines, } + controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err := r.scaleDownControlPlane(context.Background(), cluster, kcp, controlPlane, controlPlane.Machines) + result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(Equal(ctrl.Result{Requeue: true})) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) controlPlaneMachines := clusterv1.MachineList{} g.Expect(fakeClient.List(context.Background(), &controlPlaneMachines)).To(Succeed()) @@ -329,8 +336,9 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. fakeClient := newFakeClient(machines["one"], machines["two"], machines["three"]) r := &KubeadmControlPlaneReconciler{ - recorder: record.NewFakeRecorder(32), - Client: fakeClient, + recorder: record.NewFakeRecorder(32), + Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{}, }, @@ -343,10 +351,11 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. Cluster: cluster, Machines: machines, } + controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err := r.scaleDownControlPlane(context.Background(), cluster, kcp, controlPlane, controlPlane.Machines) + result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) controlPlaneMachines := clusterv1.MachineList{} g.Expect(fakeClient.List(context.Background(), &controlPlaneMachines)).To(Succeed()) @@ -359,17 +368,26 @@ func TestSelectMachineForScaleDown(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{}, } startDate := time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC) - m1 := machine("machine-1", withFailureDomain("one"), withTimestamp(startDate.Add(time.Hour))) - m2 := machine("machine-2", withFailureDomain("one"), withTimestamp(startDate.Add(-3*time.Hour))) - m3 := machine("machine-3", withFailureDomain("one"), withTimestamp(startDate.Add(-4*time.Hour))) - m4 := machine("machine-4", withFailureDomain("two"), withTimestamp(startDate.Add(-time.Hour))) - m5 := machine("machine-5", withFailureDomain("two"), withTimestamp(startDate.Add(-2*time.Hour))) - m6 := machine("machine-6", withFailureDomain("two"), withTimestamp(startDate.Add(-7*time.Hour))) - m7 := machine("machine-7", withFailureDomain("two"), withTimestamp(startDate.Add(-5*time.Hour)), withAnnotation("cluster.x-k8s.io/delete-machine")) - m8 := machine("machine-8", withFailureDomain("two"), withTimestamp(startDate.Add(-6*time.Hour)), withAnnotation("cluster.x-k8s.io/delete-machine")) + m1 := machine("machine-1", withFailureDomain("one"), withTimestamp(startDate.Add(time.Hour)), machineOpt(withNodeRef("machine-1"))) + m2 := machine("machine-2", withFailureDomain("one"), withTimestamp(startDate.Add(-3*time.Hour)), machineOpt(withNodeRef("machine-2"))) + m3 := machine("machine-3", withFailureDomain("one"), withTimestamp(startDate.Add(-4*time.Hour)), machineOpt(withNodeRef("machine-3"))) + m4 := machine("machine-4", withFailureDomain("two"), withTimestamp(startDate.Add(-time.Hour)), machineOpt(withNodeRef("machine-4"))) + m5 := machine("machine-5", withFailureDomain("two"), withTimestamp(startDate.Add(-2*time.Hour)), machineOpt(withNodeRef("machine-5"))) + m6 := machine("machine-6", withFailureDomain("two"), withTimestamp(startDate.Add(-7*time.Hour)), machineOpt(withNodeRef("machine-6"))) + m7 := machine("machine-7", withFailureDomain("two"), withTimestamp(startDate.Add(-5*time.Hour)), + withAnnotation("cluster.x-k8s.io/delete-machine"), machineOpt(withNodeRef("machine-7"))) + m8 := machine("machine-8", withFailureDomain("two"), withTimestamp(startDate.Add(-6*time.Hour)), + withAnnotation("cluster.x-k8s.io/delete-machine"), machineOpt(withNodeRef("machine-8"))) + m9 := machine("machine-9", withFailureDomain("two"), withTimestamp(startDate.Add(-5*time.Hour)), + machineOpt(withNodeRef("machine-9"))) + m10 := machine("machine-10", withFailureDomain("two"), withTimestamp(startDate.Add(-4*time.Hour)), + machineOpt(withNodeRef("machine-10")), machineOpt(withUnhealthyAPIServerPod())) + m11 := machine("machine-11", withFailureDomain("two"), withTimestamp(startDate.Add(-3*time.Hour)), + machineOpt(withNodeRef("machine-11")), machineOpt(withUnhealthyEtcdMember())) mc3 := collections.FromMachines(m1, m2, m3, m4, m5) mc6 := collections.FromMachines(m6, m7, m8) + mc9 := collections.FromMachines(m9, m10, m11) fd := clusterv1.FailureDomains{ "one": failureDomain(true), "two": failureDomain(true), @@ -380,6 +398,11 @@ func TestSelectMachineForScaleDown(t *testing.T) { Cluster: &clusterv1.Cluster{Status: clusterv1.ClusterStatus{FailureDomains: fd}}, Machines: mc3, } + needsUpgradeControlPlane1 := &internal.ControlPlane{ + KCP: &kcp, + Cluster: &clusterv1.Cluster{Status: clusterv1.ClusterStatus{FailureDomains: fd}}, + Machines: mc9, + } upToDateControlPlane := &internal.ControlPlane{ KCP: &kcp, Cluster: &clusterv1.Cluster{Status: clusterv1.ClusterStatus{FailureDomains: fd}}, @@ -443,26 +466,40 @@ func TestSelectMachineForScaleDown(t *testing.T) { expectedMachine: clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-7"}}, }, { - name: "when there is an up to date machine with delete annotation, while there are any outdated machines without annotatio that still exist, it returns oldest marked machine first", + name: "when there is an up to date machine with delete annotation, while there are any outdated machines without annotation that still exist, it returns oldest marked machine first", cp: upToDateControlPlane, outDatedMachines: collections.FromMachines(m5, m3, m8, m7, m6, m1, m2), expectErr: false, expectedMachine: clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-8"}}, }, + { + name: "when there are machines needing upgrade, it returns the single unhealthy machine with MachineAPIServerPodHealthyCondition set to False", + cp: needsUpgradeControlPlane1, + outDatedMachines: collections.FromMachines(m9, m10), + expectErr: false, + expectedMachine: clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-10"}}, + }, + { + name: "when there are machines needing upgrade, it returns the oldest unhealthy machine with MachineEtcdMemberHealthyCondition set to False", + cp: needsUpgradeControlPlane1, + outDatedMachines: collections.FromMachines(m9, m10, m11), + expectErr: false, + expectedMachine: clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-10"}}, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - selectedMachine, err := selectMachineForScaleDown(tc.cp, tc.outDatedMachines) + selectedMachine, err := selectMachineForScaleDown(ctx, tc.cp, tc.outDatedMachines) if tc.expectErr { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(tc.expectedMachine.Name).To(Equal(selectedMachine.Name)) }) } @@ -492,12 +529,28 @@ func TestPreflightChecks(t *testing.T) { }, expectResult: ctrl.Result{RequeueAfter: deleteRequeueAfter}, }, + { + name: "control plane without a nodeRef should requeue", + kcp: &controlplanev1.KubeadmControlPlane{}, + machines: []*clusterv1.Machine{ + { + Status: clusterv1.MachineStatus{ + NodeRef: nil, + }, + }, + }, + expectResult: ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, + }, { name: "control plane with an unhealthy machine condition should requeue", kcp: &controlplanev1.KubeadmControlPlane{}, machines: []*clusterv1.Machine{ { Status: clusterv1.MachineStatus{ + NodeRef: &corev1.ObjectReference{ + Kind: "Node", + Name: "node-1", + }, Conditions: clusterv1.Conditions{ *conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyCondition, "fooReason", clusterv1.ConditionSeverityError, ""), *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), @@ -523,6 +576,10 @@ func TestPreflightChecks(t *testing.T) { machines: []*clusterv1.Machine{ { Status: clusterv1.MachineStatus{ + NodeRef: &corev1.ObjectReference{ + Kind: "Node", + Name: "node-1", + }, Conditions: clusterv1.Conditions{ *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), @@ -550,8 +607,8 @@ func TestPreflightChecks(t *testing.T) { Machines: collections.FromMachines(tt.machines...), } result, err := r.preflightChecks(context.TODO(), controlPlane) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result).To(Equal(tt.expectResult)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(tt.expectResult)) }) } } @@ -613,7 +670,7 @@ func TestPreflightCheckCondition(t *testing.T) { g.Expect(err).To(HaveOccurred()) return } - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) }) } } diff --git a/controlplane/kubeadm/internal/controllers/status.go b/controlplane/kubeadm/internal/controllers/status.go index 3d742bdac7f4..7ebdbe6da49e 100644 --- a/controlplane/kubeadm/internal/controllers/status.go +++ b/controlplane/kubeadm/internal/controllers/status.go @@ -20,82 +20,72 @@ import ( "context" "github.com/pkg/errors" - ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" - "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" ) // updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the // resource status subresourcs up-to-date. -func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error { - log := ctrl.LoggerFrom(ctx) - - selector := collections.ControlPlaneSelectorForCluster(cluster.Name) +func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, controlPlane *internal.ControlPlane) error { + selector := collections.ControlPlaneSelectorForCluster(controlPlane.Cluster.Name) // Copy label selector to its status counterpart in string format. // This is necessary for CRDs including scale subresources. - kcp.Status.Selector = selector.String() + controlPlane.KCP.Status.Selector = selector.String() - ownedMachines, err := r.managementCluster.GetMachinesForCluster(ctx, cluster, collections.OwnedMachines(kcp)) + upToDateMachines, err := controlPlane.UpToDateMachines() if err != nil { - return errors.Wrap(err, "failed to get list of owned machines") - } - - controlPlane, err := internal.NewControlPlane(ctx, r.Client, cluster, kcp, ownedMachines) - if err != nil { - log.Error(err, "failed to initialize control plane") - return err + return errors.Wrapf(err, "failed to update status") } - kcp.Status.UpdatedReplicas = int32(len(controlPlane.UpToDateMachines())) + controlPlane.KCP.Status.UpdatedReplicas = int32(len(upToDateMachines)) - replicas := int32(len(ownedMachines)) - desiredReplicas := *kcp.Spec.Replicas + replicas := int32(len(controlPlane.Machines)) + desiredReplicas := *controlPlane.KCP.Spec.Replicas // set basic data that does not require interacting with the workload cluster - kcp.Status.Replicas = replicas - kcp.Status.ReadyReplicas = 0 - kcp.Status.UnavailableReplicas = replicas + controlPlane.KCP.Status.Replicas = replicas + controlPlane.KCP.Status.ReadyReplicas = 0 + controlPlane.KCP.Status.UnavailableReplicas = replicas // Return early if the deletion timestamp is set, because we don't want to try to connect to the workload cluster // and we don't want to report resize condition (because it is set to deleting into reconcile delete). - if !kcp.DeletionTimestamp.IsZero() { + if !controlPlane.KCP.DeletionTimestamp.IsZero() { return nil } - machinesWithHealthAPIServer := ownedMachines.Filter(collections.HealthyAPIServer()) - lowestVersion := machinesWithHealthAPIServer.LowestVersion() + machinesWithHealthyAPIServer := controlPlane.Machines.Filter(collections.HealthyAPIServer()) + lowestVersion := machinesWithHealthyAPIServer.LowestVersion() if lowestVersion != nil { - kcp.Status.Version = lowestVersion + controlPlane.KCP.Status.Version = lowestVersion } switch { // We are scaling up case replicas < desiredReplicas: - conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, controlplanev1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up control plane to %d replicas (actual %d)", desiredReplicas, replicas) + conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, controlplanev1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up control plane to %d replicas (actual %d)", desiredReplicas, replicas) // We are scaling down case replicas > desiredReplicas: - conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, controlplanev1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down control plane to %d replicas (actual %d)", desiredReplicas, replicas) + conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, controlplanev1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down control plane to %d replicas (actual %d)", desiredReplicas, replicas) // This means that there was no error in generating the desired number of machine objects - conditions.MarkTrue(kcp, controlplanev1.MachinesCreatedCondition) + conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesCreatedCondition) default: // make sure last resize operation is marked as completed. // NOTE: we are checking the number of machines ready so we report resize completed only when the machines // are actually provisioned (vs reporting completed immediately after the last machine object is created). - readyMachines := ownedMachines.Filter(collections.IsReady()) + readyMachines := controlPlane.Machines.Filter(collections.IsReady()) if int32(len(readyMachines)) == replicas { - conditions.MarkTrue(kcp, controlplanev1.ResizedCondition) + conditions.MarkTrue(controlPlane.KCP, controlplanev1.ResizedCondition) } // This means that there was no error in generating the desired number of machine objects - conditions.MarkTrue(kcp, controlplanev1.MachinesCreatedCondition) + conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesCreatedCondition) } - workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster)) + workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { return errors.Wrap(err, "failed to create remote cluster client") } @@ -103,17 +93,17 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c if err != nil { return err } - kcp.Status.ReadyReplicas = status.ReadyNodes - kcp.Status.UnavailableReplicas = replicas - status.ReadyNodes + controlPlane.KCP.Status.ReadyReplicas = status.ReadyNodes + controlPlane.KCP.Status.UnavailableReplicas = replicas - status.ReadyNodes // This only gets initialized once and does not change if the kubeadm config map goes away. if status.HasKubeadmConfig { - kcp.Status.Initialized = true - conditions.MarkTrue(kcp, controlplanev1.AvailableCondition) + controlPlane.KCP.Status.Initialized = true + conditions.MarkTrue(controlPlane.KCP, controlplanev1.AvailableCondition) } - if kcp.Status.ReadyReplicas > 0 { - kcp.Status.Ready = true + if controlPlane.KCP.Status.ReadyReplicas > 0 { + controlPlane.KCP.Status.Ready = true } // Surface lastRemediation data in status. @@ -121,14 +111,14 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c // most recent of the remediation we are keeping track on machines. var lastRemediation *RemediationData - if v, ok := kcp.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok { + if v, ok := controlPlane.KCP.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok { remediationData, err := RemediationDataFromAnnotation(v) if err != nil { return err } lastRemediation = remediationData } else { - for _, m := range ownedMachines.UnsortedList() { + for _, m := range controlPlane.Machines.UnsortedList() { if v, ok := m.Annotations[controlplanev1.RemediationForAnnotation]; ok { remediationData, err := RemediationDataFromAnnotation(v) if err != nil { @@ -142,7 +132,7 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c } if lastRemediation != nil { - kcp.Status.LastRemediation = lastRemediation.ToStatus() + controlPlane.KCP.Status.LastRemediation = lastRemediation.ToStatus() } return nil } diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index cf2e1c2a7e8a..833d0f717e42 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -24,14 +24,13 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + controlplanev1webhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/webhooks" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -65,11 +64,12 @@ func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) { }, }, } - kcp.Default() - g.Expect(kcp.ValidateCreate()).To(Succeed()) + webhook := &controlplanev1webhooks.KubeadmControlPlane{} + g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) + _, err := webhook.ValidateCreate(ctx, kcp) + g.Expect(err).ToNot(HaveOccurred()) fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy()) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -80,7 +80,13 @@ func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) { recorder: record.NewFakeRecorder(32), } - g.Expect(r.updateStatus(ctx, kcp, cluster)).To(Succeed()) + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + g.Expect(r.updateStatus(ctx, controlPlane)).To(Succeed()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(0)) g.Expect(kcp.Status.ReadyReplicas).To(BeEquivalentTo(0)) g.Expect(kcp.Status.UnavailableReplicas).To(BeEquivalentTo(0)) @@ -121,8 +127,10 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin }, }, } - kcp.Default() - g.Expect(kcp.ValidateCreate()).To(Succeed()) + webhook := &controlplanev1webhooks.KubeadmControlPlane{} + g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) + _, err := webhook.ValidateCreate(ctx, kcp) + g.Expect(err).ToNot(HaveOccurred()) machines := map[string]*clusterv1.Machine{} objs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy()} @@ -134,7 +142,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -145,7 +152,14 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin recorder: record.NewFakeRecorder(32), } - g.Expect(r.updateStatus(ctx, kcp, cluster)).To(Succeed()) + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: machines, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + g.Expect(r.updateStatus(ctx, controlPlane)).To(Succeed()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(3)) g.Expect(kcp.Status.ReadyReplicas).To(BeEquivalentTo(0)) g.Expect(kcp.Status.UnavailableReplicas).To(BeEquivalentTo(3)) @@ -186,8 +200,10 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T }, }, } - kcp.Default() - g.Expect(kcp.ValidateCreate()).To(Succeed()) + webhook := &controlplanev1webhooks.KubeadmControlPlane{} + g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) + _, err := webhook.ValidateCreate(ctx, kcp) + g.Expect(err).ToNot(HaveOccurred()) objs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), kubeadmConfigMap()} machines := map[string]*clusterv1.Machine{} @@ -199,7 +215,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -216,7 +231,14 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T recorder: record.NewFakeRecorder(32), } - g.Expect(r.updateStatus(ctx, kcp, cluster)).To(Succeed()) + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: machines, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + g.Expect(r.updateStatus(ctx, controlPlane)).To(Succeed()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(3)) g.Expect(kcp.Status.ReadyReplicas).To(BeEquivalentTo(3)) g.Expect(kcp.Status.UnavailableReplicas).To(BeEquivalentTo(0)) @@ -259,8 +281,10 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing }, }, } - kcp.Default() - g.Expect(kcp.ValidateCreate()).To(Succeed()) + webhook := &controlplanev1webhooks.KubeadmControlPlane{} + g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) + _, err := webhook.ValidateCreate(ctx, kcp) + g.Expect(err).ToNot(HaveOccurred()) machines := map[string]*clusterv1.Machine{} objs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy()} for i := 0; i < 4; i++ { @@ -273,7 +297,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing objs = append(objs, n, m, kubeadmConfigMap()) machines[m.Name] = m fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -290,7 +313,14 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing recorder: record.NewFakeRecorder(32), } - g.Expect(r.updateStatus(ctx, kcp, cluster)).To(Succeed()) + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: machines, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + g.Expect(r.updateStatus(ctx, controlPlane)).To(Succeed()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(5)) g.Expect(kcp.Status.ReadyReplicas).To(BeEquivalentTo(1)) g.Expect(kcp.Status.UnavailableReplicas).To(BeEquivalentTo(4)) @@ -322,7 +352,7 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr }, Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.6", - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ APIVersion: "test/v1alpha1", @@ -332,8 +362,10 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr }, }, } - kcp.Default() - g.Expect(kcp.ValidateCreate()).To(Succeed()) + webhook := &controlplanev1webhooks.KubeadmControlPlane{} + g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) + _, err := webhook.ValidateCreate(ctx, kcp) + g.Expect(err).ToNot(HaveOccurred()) machines := map[string]*clusterv1.Machine{} objs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy()} // Create the desired number of machines @@ -345,7 +377,6 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) // Set all the machines to `not ready` r := &KubeadmControlPlaneReconciler{ @@ -363,7 +394,14 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr recorder: record.NewFakeRecorder(32), } - g.Expect(r.updateStatus(ctx, kcp, cluster)).To(Succeed()) + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: machines, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + g.Expect(r.updateStatus(ctx, controlPlane)).To(Succeed()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(3)) g.Expect(kcp.Status.ReadyReplicas).To(BeEquivalentTo(0)) g.Expect(kcp.Status.UnavailableReplicas).To(BeEquivalentTo(3)) diff --git a/controlplane/kubeadm/internal/controllers/suite_test.go b/controlplane/kubeadm/internal/controllers/suite_test.go index 865f39ef5fcc..a0fc57112258 100644 --- a/controlplane/kubeadm/internal/controllers/suite_test.go +++ b/controlplane/kubeadm/internal/controllers/suite_test.go @@ -17,22 +17,44 @@ limitations under the License. package controllers import ( + "context" + "fmt" "os" "testing" + corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api/internal/test/envtest" ) var ( - env *envtest.Environment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() + secretCachingClient client.Client ) func TestMain(m *testing.M) { + setupReconcilers := func(_ context.Context, mgr ctrl.Manager) { + var err error + secretCachingClient, err = client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + panic(fmt.Sprintf("unable to create secretCachingClient: %v", err)) + } + } os.Exit(envtest.Run(ctx, envtest.RunInput{ - M: m, - SetupEnv: func(e *envtest.Environment) { env = e }, + M: m, + ManagerUncachedObjs: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + SetupEnv: func(e *envtest.Environment) { env = e }, + SetupReconcilers: setupReconcilers, })) } diff --git a/controlplane/kubeadm/internal/controllers/upgrade.go b/controlplane/kubeadm/internal/controllers/upgrade.go index a3c0a6f8ee56..ae9823d6154d 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade.go +++ b/controlplane/kubeadm/internal/controllers/upgrade.go @@ -19,42 +19,40 @@ package controllers import ( "context" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/pkg/errors" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" - "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/version" ) func (r *KubeadmControlPlaneReconciler) upgradeControlPlane( ctx context.Context, - cluster *clusterv1.Cluster, - kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane, machinesRequireUpgrade collections.Machines, ) (ctrl.Result, error) { logger := ctrl.LoggerFrom(ctx) - if kcp.Spec.RolloutStrategy == nil || kcp.Spec.RolloutStrategy.RollingUpdate == nil { + if controlPlane.KCP.Spec.RolloutStrategy == nil || controlPlane.KCP.Spec.RolloutStrategy.RollingUpdate == nil { return ctrl.Result{}, errors.New("rolloutStrategy is not set") } // TODO: handle reconciliation of etcd members and kubeadm config in case they get out of sync with cluster - workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster)) + workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { - logger.Error(err, "failed to get remote client for workload cluster", "cluster key", util.ObjectKey(cluster)) + logger.Error(err, "failed to get remote client for workload cluster", "Cluster", klog.KObj(controlPlane.Cluster)) return ctrl.Result{}, err } - parsedVersion, err := semver.ParseTolerant(kcp.Spec.Version) + parsedVersion, err := semver.ParseTolerant(controlPlane.KCP.Spec.Version) if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kcp.Spec.Version) + return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) } if err := workloadCluster.ReconcileKubeletRBACRole(ctx, parsedVersion); err != nil { @@ -71,65 +69,61 @@ func (r *KubeadmControlPlaneReconciler) upgradeControlPlane( return ctrl.Result{}, errors.Wrap(err, "failed to set role and role binding for kubeadm") } - if err := workloadCluster.UpdateKubernetesVersionInKubeadmConfigMap(ctx, parsedVersion); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update the kubernetes version in the kubeadm config map") + // Ensure kubeadm clusterRoleBinding for v1.29+ as per https://github.com/kubernetes/kubernetes/pull/121305 + if err := workloadCluster.AllowClusterAdminPermissions(ctx, parsedVersion); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to set cluster-admin ClusterRoleBinding for kubeadm") } - if kcp.Spec.KubeadmConfigSpec.ClusterConfiguration != nil { + kubeadmCMMutators := make([]func(*bootstrapv1.ClusterConfiguration), 0) + kubeadmCMMutators = append(kubeadmCMMutators, workloadCluster.UpdateKubernetesVersionInKubeadmConfigMap(parsedVersion)) + + if controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration != nil { // We intentionally only parse major/minor/patch so that the subsequent code // also already applies to beta versions of new releases. - parsedVersionTolerant, err := version.ParseMajorMinorPatchTolerant(kcp.Spec.Version) + parsedVersionTolerant, err := version.ParseMajorMinorPatchTolerant(controlPlane.KCP.Spec.Version) if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kcp.Spec.Version) - } - // Get the imageRepository or the correct value if nothing is set and a migration is necessary. - imageRepository := internal.ImageRepositoryFromClusterConfig(kcp.Spec.KubeadmConfigSpec.ClusterConfiguration, parsedVersionTolerant) - - if err := workloadCluster.UpdateImageRepositoryInKubeadmConfigMap(ctx, imageRepository, parsedVersion); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update the image repository in the kubeadm config map") - } - } - - if kcp.Spec.KubeadmConfigSpec.ClusterConfiguration != nil && kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local != nil { - meta := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local.ImageMeta - if err := workloadCluster.UpdateEtcdVersionInKubeadmConfigMap(ctx, meta.ImageRepository, meta.ImageTag, parsedVersion); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update the etcd version in the kubeadm config map") + return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) } - extraArgs := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local.ExtraArgs - if err := workloadCluster.UpdateEtcdExtraArgsInKubeadmConfigMap(ctx, extraArgs, parsedVersion); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update the etcd extra args in the kubeadm config map") + // Get the imageRepository or the correct value if nothing is set and a migration is necessary. + imageRepository := internal.ImageRepositoryFromClusterConfig(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration, parsedVersionTolerant) + + kubeadmCMMutators = append(kubeadmCMMutators, + workloadCluster.UpdateImageRepositoryInKubeadmConfigMap(imageRepository), + workloadCluster.UpdateFeatureGatesInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.FeatureGates), + workloadCluster.UpdateAPIServerInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer), + workloadCluster.UpdateControllerManagerInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager), + workloadCluster.UpdateSchedulerInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler)) + + // Etcd local and external are mutually exclusive and they cannot be switched, once set. + if controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local != nil { + kubeadmCMMutators = append(kubeadmCMMutators, + workloadCluster.UpdateEtcdLocalInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local)) + } else { + kubeadmCMMutators = append(kubeadmCMMutators, + workloadCluster.UpdateEtcdExternalInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External)) } } - if kcp.Spec.KubeadmConfigSpec.ClusterConfiguration != nil { - if err := workloadCluster.UpdateAPIServerInKubeadmConfigMap(ctx, kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer, parsedVersion); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update api server in the kubeadm config map") - } - - if err := workloadCluster.UpdateControllerManagerInKubeadmConfigMap(ctx, kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager, parsedVersion); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update controller manager in the kubeadm config map") - } - - if err := workloadCluster.UpdateSchedulerInKubeadmConfigMap(ctx, kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler, parsedVersion); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update scheduler in the kubeadm config map") - } + // collectively update Kubeadm config map + if err = workloadCluster.UpdateClusterConfiguration(ctx, parsedVersion, kubeadmCMMutators...); err != nil { + return ctrl.Result{}, err } if err := workloadCluster.UpdateKubeletConfigMap(ctx, parsedVersion); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to upgrade kubelet config map") } - switch kcp.Spec.RolloutStrategy.Type { + switch controlPlane.KCP.Spec.RolloutStrategy.Type { case controlplanev1.RollingUpdateStrategyType: // RolloutStrategy is currently defaulted and validated to be RollingUpdate // We can ignore MaxUnavailable because we are enforcing health checks before we get here. - maxNodes := *kcp.Spec.Replicas + int32(kcp.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntValue()) + maxNodes := *controlPlane.KCP.Spec.Replicas + int32(controlPlane.KCP.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntValue()) if int32(controlPlane.Machines.Len()) < maxNodes { // scaleUp ensures that we don't continue scaling up while waiting for Machines to have NodeRefs - return r.scaleUpControlPlane(ctx, cluster, kcp, controlPlane) + return r.scaleUpControlPlane(ctx, controlPlane) } - return r.scaleDownControlPlane(ctx, cluster, kcp, controlPlane, machinesRequireUpgrade) + return r.scaleDownControlPlane(ctx, controlPlane, machinesRequireUpgrade) default: logger.Info("RolloutStrategy type is not set to RollingUpdateStrategyType, unable to determine the strategy for rolling out machines") return ctrl.Result{}, nil diff --git a/controlplane/kubeadm/internal/controllers/upgrade_test.go b/controlplane/kubeadm/internal/controllers/upgrade_test.go index 75f26fe3d860..be5939f30963 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade_test.go +++ b/controlplane/kubeadm/internal/controllers/upgrade_test.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -35,6 +35,7 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/internal/test/builder" + "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" ) @@ -48,7 +49,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { t.Log("Creating the namespace") ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-rollout-scaleup") - g.Expect(err).To(BeNil()) + g.Expect(err).ToNot(HaveOccurred()) return ns } @@ -74,13 +75,13 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { cluster.Status.InfrastructureReady = true kcp.UID = types.UID(util.RandomString(10)) kcp.Spec.KubeadmConfigSpec.ClusterConfiguration = nil - kcp.Spec.Replicas = pointer.Int32(1) + kcp.Spec.Replicas = ptr.To[int32](1) setKCPHealthy(kcp) r := &KubeadmControlPlaneReconciler{ - Client: env, - APIReader: env.GetAPIReader(), - recorder: record.NewFakeRecorder(32), + Client: env, + SecretCachingClient: secretCachingClient, + recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Management: &internal.Management{Client: env}, Workload: fakeWorkloadCluster{ @@ -93,17 +94,18 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { Status: internal.ClusterStatus{Nodes: 1}, }, }, - disableInPlacePropagation: true, + ssaCache: ssa.NewCache(), } controlPlane := &internal.ControlPlane{ KCP: kcp, Cluster: cluster, Machines: nil, } + controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err := r.initializeControlPlane(ctx, cluster, kcp, controlPlane) - g.Expect(result).To(Equal(ctrl.Result{Requeue: true})) - g.Expect(err).NotTo(HaveOccurred()) + result, err := r.initializeControlPlane(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(err).ToNot(HaveOccurred()) // initial setup initialMachine := &clusterv1.MachineList{} @@ -123,9 +125,9 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { // run upgrade the first time, expect we scale up needingUpgrade := collections.FromMachineList(initialMachine) controlPlane.Machines = needingUpgrade - result, err = r.upgradeControlPlane(ctx, cluster, kcp, controlPlane, needingUpgrade) - g.Expect(result).To(Equal(ctrl.Result{Requeue: true})) - g.Expect(err).To(BeNil()) + result, err = r.upgradeControlPlane(ctx, controlPlane, needingUpgrade) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(err).ToNot(HaveOccurred()) bothMachines := &clusterv1.MachineList{} g.Eventually(func(g Gomega) { g.Expect(env.List(ctx, bothMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) @@ -135,9 +137,16 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { // run upgrade a second time, simulate that the node has not appeared yet but the machine exists // Unhealthy control plane will be detected during reconcile loop and upgrade will never be called. - result, err = r.reconcile(context.Background(), cluster, kcp) + controlPlane = &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: collections.FromMachineList(bothMachines), + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + result, err = r.reconcile(context.Background(), controlPlane) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) g.Eventually(func(g Gomega) { g.Expect(env.List(context.Background(), bothMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) g.Expect(bothMachines.Items).To(HaveLen(2)) @@ -158,9 +167,9 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { } // run upgrade the second time, expect we scale down - result, err = r.upgradeControlPlane(ctx, cluster, kcp, controlPlane, machinesRequireUpgrade) - g.Expect(err).To(BeNil()) - g.Expect(result).To(Equal(ctrl.Result{Requeue: true})) + result, err = r.upgradeControlPlane(ctx, controlPlane, machinesRequireUpgrade) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) finalMachine := &clusterv1.MachineList{} g.Eventually(func(g Gomega) { g.Expect(env.List(ctx, finalMachine, client.InNamespace(cluster.Namespace))).To(Succeed()) @@ -177,7 +186,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault) cluster.Spec.ControlPlaneEndpoint.Host = "nodomain.example.com1" cluster.Spec.ControlPlaneEndpoint.Port = 6443 - kcp.Spec.Replicas = pointer.Int32(3) + kcp.Spec.Replicas = ptr.To[int32](3) kcp.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal = 0 setKCPHealthy(kcp) @@ -219,8 +228,8 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { fakeClient := newFakeClient(objs...) fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ - APIReader: fakeClient, Client: fakeClient, + SecretCachingClient: fakeClient, managementCluster: fmc, managementClusterUncached: fmc, } @@ -230,10 +239,11 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { Cluster: cluster, Machines: nil, } + controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err := r.reconcile(ctx, cluster, kcp) - g.Expect(result).To(Equal(ctrl.Result{})) - g.Expect(err).NotTo(HaveOccurred()) + result, err := r.reconcile(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) + g.Expect(err).ToNot(HaveOccurred()) machineList := &clusterv1.MachineList{} g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) @@ -248,9 +258,10 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { // run upgrade, expect we scale down needingUpgrade := collections.FromMachineList(machineList) controlPlane.Machines = needingUpgrade - result, err = r.upgradeControlPlane(ctx, cluster, kcp, controlPlane, needingUpgrade) - g.Expect(result).To(Equal(ctrl.Result{Requeue: true})) - g.Expect(err).To(BeNil()) + + result, err = r.upgradeControlPlane(ctx, controlPlane, needingUpgrade) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(err).ToNot(HaveOccurred()) remainingMachines := &clusterv1.MachineList{} g.Expect(fakeClient.List(ctx, remainingMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) g.Expect(remainingMachines.Items).To(HaveLen(2)) diff --git a/controlplane/kubeadm/internal/etcd/etcd.go b/controlplane/kubeadm/internal/etcd/etcd.go index abcdd3bc85fe..a3f459ff1801 100644 --- a/controlplane/kubeadm/internal/etcd/etcd.go +++ b/controlplane/kubeadm/internal/etcd/etcd.go @@ -131,7 +131,7 @@ func pbMemberToMember(m *etcdserverpb.Member) *Member { // ClientConfiguration describes the configuration for an etcd client. type ClientConfiguration struct { - Endpoints []string + Endpoint string Proxy proxy.Proxy TLSConfig *tls.Config DialTimeout time.Duration @@ -146,7 +146,7 @@ func NewClient(ctx context.Context, config ClientConfiguration) (*Client, error) } etcdClient, err := clientv3.New(clientv3.Config{ - Endpoints: config.Endpoints, + Endpoints: []string{config.Endpoint}, // NOTE: endpoint is used only as a host for certificate validation, the network connection is defined by DialOptions. DialTimeout: config.DialTimeout, DialOptions: []grpc.DialOption{ grpc.WithBlock(), // block until the underlying connection is up diff --git a/controlplane/kubeadm/internal/etcd/etcd_test.go b/controlplane/kubeadm/internal/etcd/etcd_test.go index d5699d64f82f..3b8b65498565 100644 --- a/controlplane/kubeadm/internal/etcd/etcd_test.go +++ b/controlplane/kubeadm/internal/etcd/etcd_test.go @@ -50,7 +50,7 @@ func TestEtcdMembers_WithErrors(t *testing.T) { } client, err := newEtcdClient(ctx, fakeEtcdClient, DefaultCallTimeout) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) members, err := client.Members(ctx) g.Expect(err).To(HaveOccurred()) @@ -87,20 +87,20 @@ func TestEtcdMembers_WithSuccess(t *testing.T) { } client, err := newEtcdClient(ctx, fakeEtcdClient, DefaultCallTimeout) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) members, err := client.Members(ctx) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(members).To(HaveLen(1)) err = client.MoveLeader(ctx, 1) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) err = client.RemoveMember(ctx, 1234) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) updatedMembers, err := client.UpdateMemberPeerURLs(ctx, 1234, []string{"https://4.5.6.7:2000"}) - g.Expect(err).NotTo(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(updatedMembers[0].PeerURLs).To(HaveLen(2)) g.Expect(updatedMembers[0].PeerURLs).To(Equal([]string{"https://1.2.3.4:2000", "https://4.5.6.7:2000"})) } diff --git a/controlplane/kubeadm/internal/etcd_client_generator.go b/controlplane/kubeadm/internal/etcd_client_generator.go index 7786d4cf6ba7..f67fc04281d3 100644 --- a/controlplane/kubeadm/internal/etcd_client_generator.go +++ b/controlplane/kubeadm/internal/etcd_client_generator.go @@ -38,7 +38,7 @@ type EtcdClientGenerator struct { createClient clientCreator } -type clientCreator func(ctx context.Context, endpoints []string) (*etcd.Client, error) +type clientCreator func(ctx context.Context, endpoint string) (*etcd.Client, error) var errEtcdNodeConnection = errors.New("failed to connect to etcd node") @@ -46,7 +46,7 @@ var errEtcdNodeConnection = errors.New("failed to connect to etcd node") func NewEtcdClientGenerator(restConfig *rest.Config, tlsConfig *tls.Config, etcdDialTimeout, etcdCallTimeout time.Duration) *EtcdClientGenerator { ecg := &EtcdClientGenerator{restConfig: restConfig, tlsConfig: tlsConfig} - ecg.createClient = func(ctx context.Context, endpoints []string) (*etcd.Client, error) { + ecg.createClient = func(ctx context.Context, endpoint string) (*etcd.Client, error) { p := proxy.Proxy{ Kind: "pods", Namespace: metav1.NamespaceSystem, @@ -54,7 +54,7 @@ func NewEtcdClientGenerator(restConfig *rest.Config, tlsConfig *tls.Config, etcd Port: 2379, } return etcd.NewClient(ctx, etcd.ClientConfiguration{ - Endpoints: endpoints, + Endpoint: endpoint, Proxy: p, TLSConfig: tlsConfig, DialTimeout: etcdDialTimeout, @@ -75,8 +75,8 @@ func (c *EtcdClientGenerator) forFirstAvailableNode(ctx context.Context, nodeNam // Loop through the existing control plane nodes. var errs []error for _, name := range nodeNames { - endpoints := []string{staticPodName("etcd", name)} - client, err := c.createClient(ctx, endpoints) + endpoint := staticPodName("etcd", name) + client, err := c.createClient(ctx, endpoint) if err != nil { errs = append(errs, err) continue diff --git a/controlplane/kubeadm/internal/etcd_client_generator_test.go b/controlplane/kubeadm/internal/etcd_client_generator_test.go index eff3879ec2d6..ed9703b4af9e 100644 --- a/controlplane/kubeadm/internal/etcd_client_generator_test.go +++ b/controlplane/kubeadm/internal/etcd_client_generator_test.go @@ -54,8 +54,8 @@ func TestFirstAvailableNode(t *testing.T) { { name: "Returns client successfully", nodes: []string{"node-1"}, - cc: func(ctx context.Context, endpoints []string) (*etcd.Client, error) { - return &etcd.Client{Endpoint: endpoints[0]}, nil + cc: func(_ context.Context, endpoint string) (*etcd.Client, error) { + return &etcd.Client{Endpoint: endpoint}, nil }, expectedClient: etcd.Client{Endpoint: "etcd-node-1"}, }, @@ -68,7 +68,7 @@ func TestFirstAvailableNode(t *testing.T) { { name: "Returns error from client", nodes: []string{"node-1", "node-2"}, - cc: func(ctx context.Context, endpoints []string) (*etcd.Client, error) { + cc: func(context.Context, string) (*etcd.Client, error) { return nil, errors.New("something went wrong") }, expectedErr: "could not establish a connection to any etcd node: something went wrong", @@ -76,12 +76,12 @@ func TestFirstAvailableNode(t *testing.T) { { name: "Returns client when some of the nodes are down but at least one node is up", nodes: []string{"node-down-1", "node-down-2", "node-up"}, - cc: func(ctx context.Context, endpoints []string) (*etcd.Client, error) { - if strings.Contains(endpoints[0], "node-down") { + cc: func(_ context.Context, endpoint string) (*etcd.Client, error) { + if strings.Contains(endpoint, "node-down") { return nil, errors.New("node down") } - return &etcd.Client{Endpoint: endpoints[0]}, nil + return &etcd.Client{Endpoint: endpoint}, nil }, expectedClient: etcd.Client{Endpoint: "etcd-node-up"}, }, @@ -99,7 +99,7 @@ func TestFirstAvailableNode(t *testing.T) { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).Should(Equal(tt.expectedErr)) } else { - g.Expect(*client).Should(Equal(tt.expectedClient)) + g.Expect(*client).Should(BeComparableTo(tt.expectedClient)) } }) } @@ -117,9 +117,9 @@ func TestForLeader(t *testing.T) { { name: "Returns client for leader successfully", nodes: []string{"node-1", "node-leader"}, - cc: func(ctx context.Context, endpoints []string) (*etcd.Client, error) { + cc: func(_ context.Context, endpoint string) (*etcd.Client, error) { return &etcd.Client{ - Endpoint: endpoints[0], + Endpoint: endpoint, LeaderID: 1729, EtcdClient: &etcdfake.FakeEtcdClient{ MemberListResponse: &clientv3.MemberListResponse{ @@ -146,12 +146,12 @@ func TestForLeader(t *testing.T) { { name: "Returns client for leader even when one or more nodes are down", nodes: []string{"node-down-1", "node-down-2", "node-leader"}, - cc: func(ctx context.Context, endpoints []string) (*etcd.Client, error) { - if strings.Contains(endpoints[0], "node-down") { + cc: func(_ context.Context, endpoint string) (*etcd.Client, error) { + if strings.Contains(endpoint, "node-down") { return nil, errors.New("node down") } return &etcd.Client{ - Endpoint: endpoints[0], + Endpoint: endpoint, LeaderID: 1729, EtcdClient: &etcdfake.FakeEtcdClient{ MemberListResponse: &clientv3.MemberListResponse{ @@ -182,9 +182,9 @@ func TestForLeader(t *testing.T) { { name: "Returns error when the leader does not have a corresponding node", nodes: []string{"node-1"}, - cc: func(ctx context.Context, endpoints []string) (*etcd.Client, error) { + cc: func(_ context.Context, endpoint string) (*etcd.Client, error) { return &etcd.Client{ - Endpoint: endpoints[0], + Endpoint: endpoint, LeaderID: 1729, EtcdClient: &etcdfake.FakeEtcdClient{ MemberListResponse: &clientv3.MemberListResponse{ @@ -201,7 +201,7 @@ func TestForLeader(t *testing.T) { { name: "Returns error when all nodes are down", nodes: []string{"node-down-1", "node-down-2", "node-down-3"}, - cc: func(ctx context.Context, endpoints []string) (*etcd.Client, error) { + cc: func(context.Context, string) (*etcd.Client, error) { return nil, errors.New("node down") }, expectedErr: "could not establish a connection to the etcd leader: [could not establish a connection to any etcd node: node down, failed to connect to etcd node]", @@ -221,7 +221,7 @@ func TestForLeader(t *testing.T) { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).Should(Equal(tt.expectedErr)) } else { - g.Expect(*client).Should(Equal(tt.expectedClient)) + g.Expect(*client).Should(BeComparableTo(tt.expectedClient)) } }) } diff --git a/controlplane/kubeadm/internal/filters.go b/controlplane/kubeadm/internal/filters.go index 5dd8a9e016c1..acb54d51e545 100644 --- a/controlplane/kubeadm/internal/filters.go +++ b/controlplane/kubeadm/internal/filters.go @@ -18,112 +18,168 @@ package internal import ( "encoding/json" + "fmt" "reflect" + "strings" + "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/internal/util/compare" "sigs.k8s.io/cluster-api/util/collections" ) -// MatchesMachineSpec returns a filter to find all machines that matches with KCP config and do not require any rollout. +// matchesMachineSpec checks if a Machine matches any of a set of KubeadmConfigs and a set of infra machine configs. +// If it doesn't, it returns the reasons why. // Kubernetes version, infrastructure template, and KubeadmConfig field need to be equivalent. // Note: We don't need to compare the entire MachineSpec to determine if a Machine needs to be rolled out, // because all the fields in the MachineSpec, except for version, the infrastructureRef and bootstrap.ConfigRef, are either: // - mutated in-place (ex: NodeDrainTimeout) // - are not dictated by KCP (ex: ProviderID) // - are not relevant for the rollout decision (ex: failureDomain). -func MatchesMachineSpec(infraConfigs map[string]*unstructured.Unstructured, machineConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane) func(machine *clusterv1.Machine) bool { - return collections.And( - collections.MatchesKubernetesVersion(kcp.Spec.Version), - MatchesKubeadmBootstrapConfig(machineConfigs, kcp), - MatchesTemplateClonedFrom(infraConfigs, kcp), - ) +func matchesMachineSpec(infraConfigs map[string]*unstructured.Unstructured, machineConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool, error) { + mismatchReasons := []string{} + + if !collections.MatchesKubernetesVersion(kcp.Spec.Version)(machine) { + machineVersion := "" + if machine != nil && machine.Spec.Version != nil { + machineVersion = *machine.Spec.Version + } + mismatchReasons = append(mismatchReasons, fmt.Sprintf("Machine version %q is not equal to KCP version %q", machineVersion, kcp.Spec.Version)) + } + + reason, matches, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, machine) + if err != nil { + return "", false, errors.Wrapf(err, "failed to match Machine spec") + } + if !matches { + mismatchReasons = append(mismatchReasons, reason) + } + + if reason, matches := matchesTemplateClonedFrom(infraConfigs, kcp, machine); !matches { + mismatchReasons = append(mismatchReasons, reason) + } + + if len(mismatchReasons) > 0 { + return strings.Join(mismatchReasons, ","), false, nil + } + + return "", true, nil } -// NeedsRollout returns a filter to determine if a machine needs rollout. -func NeedsRollout(reconciliationTime, rolloutAfter *metav1.Time, rolloutBefore *controlplanev1.RolloutBefore, infraConfigs map[string]*unstructured.Unstructured, machineConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane) func(machine *clusterv1.Machine) bool { - return collections.Or( - // Machines whose certificates are about to expire. - collections.ShouldRolloutBefore(reconciliationTime, rolloutBefore), - // Machines that are scheduled for rollout (KCP.Spec.RolloutAfter set, the RolloutAfter deadline is expired, and the machine was created before the deadline). - collections.ShouldRolloutAfter(reconciliationTime, rolloutAfter), - // Machines that do not match with KCP config. - collections.Not(MatchesMachineSpec(infraConfigs, machineConfigs, kcp)), - ) +// NeedsRollout checks if a Machine needs to be rolled out and returns the reason why. +func NeedsRollout(reconciliationTime, rolloutAfter *metav1.Time, rolloutBefore *controlplanev1.RolloutBefore, infraConfigs map[string]*unstructured.Unstructured, machineConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool, error) { + rolloutReasons := []string{} + + // Machines whose certificates are about to expire. + if collections.ShouldRolloutBefore(reconciliationTime, rolloutBefore)(machine) { + rolloutReasons = append(rolloutReasons, "certificates will expire soon, rolloutBefore expired") + } + + // Machines that are scheduled for rollout (KCP.Spec.RolloutAfter set, + // the RolloutAfter deadline is expired, and the machine was created before the deadline). + if collections.ShouldRolloutAfter(reconciliationTime, rolloutAfter)(machine) { + rolloutReasons = append(rolloutReasons, "rolloutAfter expired") + } + + // Machines that do not match with KCP config. + mismatchReason, matches, err := matchesMachineSpec(infraConfigs, machineConfigs, kcp, machine) + if err != nil { + return "", false, errors.Wrapf(err, "failed to determine if Machine %s needs rollout", machine.Name) + } + if !matches { + rolloutReasons = append(rolloutReasons, mismatchReason) + } + + if len(rolloutReasons) > 0 { + return fmt.Sprintf("Machine %s needs rollout: %s", machine.Name, strings.Join(rolloutReasons, ",")), true, nil + } + + return "", false, nil } -// MatchesTemplateClonedFrom returns a filter to find all machines that have a corresponding infrastructure machine that -// matches a given KCP infra template. +// matchesTemplateClonedFrom checks if a Machine has a corresponding infrastructure machine that +// matches a given KCP infra template and if it doesn't match returns the reason why. // Note: Differences to the labels and annotations on the infrastructure machine are not considered for matching // criteria, because changes to labels and annotations are propagated in-place to the infrastructure machines. // TODO: This function will be renamed in a follow-up PR to something better. (ex: MatchesInfraMachine). -func MatchesTemplateClonedFrom(infraConfigs map[string]*unstructured.Unstructured, kcp *controlplanev1.KubeadmControlPlane) collections.Func { - return func(machine *clusterv1.Machine) bool { - if machine == nil { - return false - } - infraObj, found := infraConfigs[machine.Name] - if !found { - // Return true here because failing to get infrastructure machine should not be considered as unmatching. - return true - } - - clonedFromName, ok1 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation] - clonedFromGroupKind, ok2 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromGroupKindAnnotation] - if !ok1 || !ok2 { - // All kcp cloned infra machines should have this annotation. - // Missing the annotation may be due to older version machines or adopted machines. - // Should not be considered as mismatch. - return true - } +func matchesTemplateClonedFrom(infraConfigs map[string]*unstructured.Unstructured, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool) { + if machine == nil { + return "Machine cannot be compared with KCP.spec.machineTemplate.infrastructureRef: Machine is nil", false + } + infraObj, found := infraConfigs[machine.Name] + if !found { + // Return true here because failing to get infrastructure machine should not be considered as unmatching. + return "", true + } - // Check if the machine's infrastructure reference has been created from the current KCP infrastructure template. - if clonedFromName != kcp.Spec.MachineTemplate.InfrastructureRef.Name || - clonedFromGroupKind != kcp.Spec.MachineTemplate.InfrastructureRef.GroupVersionKind().GroupKind().String() { - return false - } + clonedFromName, ok1 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation] + clonedFromGroupKind, ok2 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromGroupKindAnnotation] + if !ok1 || !ok2 { + // All kcp cloned infra machines should have this annotation. + // Missing the annotation may be due to older version machines or adopted machines. + // Should not be considered as mismatch. + return "", true + } - return true + // Check if the machine's infrastructure reference has been created from the current KCP infrastructure template. + if clonedFromName != kcp.Spec.MachineTemplate.InfrastructureRef.Name || + clonedFromGroupKind != kcp.Spec.MachineTemplate.InfrastructureRef.GroupVersionKind().GroupKind().String() { + return fmt.Sprintf("Infrastructure template on KCP rotated from %s %s to %s %s", + clonedFromGroupKind, clonedFromName, + kcp.Spec.MachineTemplate.InfrastructureRef.GroupVersionKind().GroupKind().String(), kcp.Spec.MachineTemplate.InfrastructureRef.Name), false } + + return "", true } -// MatchesKubeadmBootstrapConfig checks if machine's KubeadmConfigSpec is equivalent with KCP's KubeadmConfigSpec. +// matchesKubeadmBootstrapConfig checks if machine's KubeadmConfigSpec is equivalent with KCP's KubeadmConfigSpec. // Note: Differences to the labels and annotations on the KubeadmConfig are not considered for matching // criteria, because changes to labels and annotations are propagated in-place to KubeadmConfig. -func MatchesKubeadmBootstrapConfig(machineConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane) collections.Func { - return func(machine *clusterv1.Machine) bool { - if machine == nil { - return false - } +func matchesKubeadmBootstrapConfig(machineConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool, error) { + if machine == nil { + return "Machine KubeadmConfig cannot be compared: Machine is nil", false, nil + } - // Check if KCP and machine ClusterConfiguration matches, if not return - if match := matchClusterConfiguration(kcp, machine); !match { - return false - } + // Check if KCP and machine ClusterConfiguration matches, if not return + match, diff, err := matchClusterConfiguration(kcp, machine) + if err != nil { + return "", false, errors.Wrapf(err, "failed to match KubeadmConfig") + } + if !match { + return fmt.Sprintf("Machine KubeadmConfig ClusterConfiguration is outdated: diff: %s", diff), false, nil + } - bootstrapRef := machine.Spec.Bootstrap.ConfigRef - if bootstrapRef == nil { - // Missing bootstrap reference should not be considered as unmatching. - // This is a safety precaution to avoid selecting machines that are broken, which in the future should be remediated separately. - return true - } + bootstrapRef := machine.Spec.Bootstrap.ConfigRef + if bootstrapRef == nil { + // Missing bootstrap reference should not be considered as unmatching. + // This is a safety precaution to avoid selecting machines that are broken, which in the future should be remediated separately. + return "", true, nil + } - machineConfig, found := machineConfigs[machine.Name] - if !found { - // Return true here because failing to get KubeadmConfig should not be considered as unmatching. - // This is a safety precaution to avoid rolling out machines if the client or the api-server is misbehaving. - return true - } + machineConfig, found := machineConfigs[machine.Name] + if !found { + // Return true here because failing to get KubeadmConfig should not be considered as unmatching. + // This is a safety precaution to avoid rolling out machines if the client or the api-server is misbehaving. + return "", true, nil + } - // Check if KCP and machine InitConfiguration or JoinConfiguration matches - // NOTE: only one between init configuration and join configuration is set on a machine, depending - // on the fact that the machine was the initial control plane node or a joining control plane node. - return matchInitOrJoinConfiguration(machineConfig, kcp) + // Check if KCP and machine InitConfiguration or JoinConfiguration matches + // NOTE: only one between init configuration and join configuration is set on a machine, depending + // on the fact that the machine was the initial control plane node or a joining control plane node. + match, diff, err = matchInitOrJoinConfiguration(machineConfig, kcp) + if err != nil { + return "", false, errors.Wrapf(err, "failed to match KubeadmConfig") + } + if !match { + return fmt.Sprintf("Machine KubeadmConfig InitConfiguration or JoinConfiguration are outdated: diff: %s", diff), false, nil } + + return "", true, nil } // matchClusterConfiguration verifies if KCP and machine ClusterConfiguration matches. @@ -131,11 +187,11 @@ func MatchesKubeadmBootstrapConfig(machineConfigs map[string]*bootstrapv1.Kubead // If the annotation is not present (machine is either old or adopted), we won't roll out on any possible changes // made in KCP's ClusterConfiguration given that we don't have enough information to make a decision. // Users should use KCP.Spec.RolloutAfter field to force a rollout in this case. -func matchClusterConfiguration(kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) bool { +func matchClusterConfiguration(kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (bool, string, error) { machineClusterConfigStr, ok := machine.GetAnnotations()[controlplanev1.KubeadmClusterConfigurationAnnotation] if !ok { // We don't have enough information to make a decision; don't' trigger a roll out. - return true + return true, "", nil } machineClusterConfig := &bootstrapv1.ClusterConfiguration{} @@ -144,29 +200,37 @@ func matchClusterConfiguration(kcp *controlplanev1.KubeadmControlPlane, machine // otherwise we won't be able to handle a nil ClusterConfiguration (that is serialized into "null"). // See https://github.com/kubernetes-sigs/cluster-api/issues/3353. if err := json.Unmarshal([]byte(machineClusterConfigStr), &machineClusterConfig); err != nil { - return false + return false, "", nil //nolint:nilerr // Intentionally not returning the error here } // If any of the compared values are nil, treat them the same as an empty ClusterConfiguration. if machineClusterConfig == nil { machineClusterConfig = &bootstrapv1.ClusterConfiguration{} } + kcpLocalClusterConfiguration := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration if kcpLocalClusterConfiguration == nil { kcpLocalClusterConfiguration = &bootstrapv1.ClusterConfiguration{} } + // Skip checking DNS fields because we can update the configuration of the working cluster in place. + machineClusterConfig.DNS = kcpLocalClusterConfiguration.DNS + // Compare and return. - return reflect.DeepEqual(machineClusterConfig, kcpLocalClusterConfiguration) + match, diff, err := compare.Diff(machineClusterConfig, kcpLocalClusterConfiguration) + if err != nil { + return false, "", errors.Wrapf(err, "failed to match ClusterConfiguration") + } + return match, diff, nil } // matchInitOrJoinConfiguration verifies if KCP and machine InitConfiguration or JoinConfiguration matches. // NOTE: By extension this method takes care of detecting changes in other fields of the KubeadmConfig configuration (e.g. Files, Mounts etc.) -func matchInitOrJoinConfiguration(machineConfig *bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane) bool { +func matchInitOrJoinConfiguration(machineConfig *bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane) (bool, string, error) { if machineConfig == nil { // Return true here because failing to get KubeadmConfig should not be considered as unmatching. // This is a safety precaution to avoid rolling out machines if the client or the api-server is misbehaving. - return true + return true, "", nil } // takes the KubeadmConfigSpec from KCP and applies the transformations required @@ -177,13 +241,17 @@ func matchInitOrJoinConfiguration(machineConfig *bootstrapv1.KubeadmConfig, kcp // *Note* This assumes that newly added default values never // introduce a semantic difference to the unset value. // But that is something that is ensured by our API guarantees. - bootstrapv1.DefaultKubeadmConfigSpec(kcpConfig) - bootstrapv1.DefaultKubeadmConfigSpec(&machineConfig.Spec) + kcpConfig.Default() + machineConfig.Spec.Default() // cleanups all the fields that are not relevant for the comparison. cleanupConfigFields(kcpConfig, machineConfig) - return reflect.DeepEqual(&machineConfig.Spec, kcpConfig) + match, diff, err := compare.Diff(&machineConfig.Spec, kcpConfig) + if err != nil { + return false, "", errors.Wrapf(err, "failed to match InitConfiguration or JoinConfiguration") + } + return match, diff, nil } // getAdjustedKcpConfig takes the KubeadmConfigSpec from KCP and applies the transformations required diff --git a/controlplane/kubeadm/internal/filters_test.go b/controlplane/kubeadm/internal/filters_test.go index 79211ccd3ee9..a496d5e1857f 100644 --- a/controlplane/kubeadm/internal/filters_test.go +++ b/controlplane/kubeadm/internal/filters_test.go @@ -34,9 +34,12 @@ func TestMatchClusterConfiguration(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{} m := &clusterv1.Machine{} - g.Expect(matchClusterConfiguration(kcp, m)).To(BeTrue()) + match, diff, err := matchClusterConfiguration(kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(diff).To(BeEmpty()) }) - t.Run("machine without an invalid ClusterConfiguration annotation should not match (only solution is to rollout)", func(t *testing.T) { + t.Run("machine with an invalid ClusterConfiguration annotation should not match (only solution is to rollout)", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{} m := &clusterv1.Machine{ @@ -46,7 +49,10 @@ func TestMatchClusterConfiguration(t *testing.T) { }, }, } - g.Expect(matchClusterConfiguration(kcp, m)).To(BeFalse()) + match, diff, err := matchClusterConfiguration(kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(diff).To(BeEmpty()) }) t.Run("Return true if cluster configuration matches", func(t *testing.T) { g := NewWithT(t) @@ -66,7 +72,10 @@ func TestMatchClusterConfiguration(t *testing.T) { }, }, } - g.Expect(matchClusterConfiguration(kcp, m)).To(BeTrue()) + match, diff, err := matchClusterConfiguration(kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(diff).To(BeEmpty()) }) t.Run("Return false if cluster configuration does not match", func(t *testing.T) { g := NewWithT(t) @@ -86,7 +95,16 @@ func TestMatchClusterConfiguration(t *testing.T) { }, }, } - g.Expect(matchClusterConfiguration(kcp, m)).To(BeFalse()) + match, diff, err := matchClusterConfiguration(kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(diff).To(BeComparableTo(`&v1beta1.ClusterConfiguration{ + ... // 10 identical fields + ImageRepository: "", + FeatureGates: nil, +- ClusterName: "bar", ++ ClusterName: "foo", + }`)) }) t.Run("Return true if cluster configuration is nil (special case)", func(t *testing.T) { g := NewWithT(t) @@ -102,7 +120,38 @@ func TestMatchClusterConfiguration(t *testing.T) { }, }, } - g.Expect(matchClusterConfiguration(kcp, m)).To(BeTrue()) + match, diff, err := matchClusterConfiguration(kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(diff).To(BeEmpty()) + }) + t.Run("Return true although the DNS fields are different", func(t *testing.T) { + g := NewWithT(t) + kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ + DNS: bootstrapv1.DNS{ + ImageMeta: bootstrapv1.ImageMeta{ + ImageTag: "v1.10.1", + ImageRepository: "gcr.io/capi-test", + }, + }, + }, + }, + }, + } + m := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + controlplanev1.KubeadmClusterConfigurationAnnotation: "{\"dns\":{\"imageRepository\":\"gcr.io/capi-test\",\"imageTag\":\"v1.9.3\"}}", + }, + }, + } + match, diff, err := matchClusterConfiguration(kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(diff).To(BeEmpty()) }) } @@ -191,8 +240,8 @@ func TestCleanupConfigFields(t *testing.T) { }, } cleanupConfigFields(kcpConfig, machineConfig) - g.Expect(kcpConfig.JoinConfiguration.Discovery).To(Equal(bootstrapv1.Discovery{})) - g.Expect(machineConfig.Spec.JoinConfiguration.Discovery).To(Equal(bootstrapv1.Discovery{})) + g.Expect(kcpConfig.JoinConfiguration.Discovery).To(BeComparableTo(bootstrapv1.Discovery{})) + g.Expect(machineConfig.Spec.JoinConfiguration.Discovery).To(BeComparableTo(bootstrapv1.Discovery{})) }) t.Run("JoinConfiguration.ControlPlane gets removed from MachineConfig if it was not derived by KCPConfig", func(t *testing.T) { g := NewWithT(t) @@ -228,7 +277,7 @@ func TestCleanupConfigFields(t *testing.T) { } cleanupConfigFields(kcpConfig, machineConfig) g.Expect(kcpConfig.JoinConfiguration).ToNot(BeNil()) - g.Expect(machineConfig.Spec.JoinConfiguration.NodeRegistration).To(Equal(bootstrapv1.NodeRegistrationOptions{})) + g.Expect(machineConfig.Spec.JoinConfiguration.NodeRegistration).To(BeComparableTo(bootstrapv1.NodeRegistrationOptions{})) }) t.Run("InitConfiguration.TypeMeta gets removed from MachineConfig", func(t *testing.T) { g := NewWithT(t) @@ -247,7 +296,7 @@ func TestCleanupConfigFields(t *testing.T) { } cleanupConfigFields(kcpConfig, machineConfig) g.Expect(kcpConfig.InitConfiguration).ToNot(BeNil()) - g.Expect(machineConfig.Spec.InitConfiguration.TypeMeta).To(Equal(metav1.TypeMeta{})) + g.Expect(machineConfig.Spec.InitConfiguration.TypeMeta).To(BeComparableTo(metav1.TypeMeta{})) }) t.Run("JoinConfiguration.TypeMeta gets removed from MachineConfig", func(t *testing.T) { g := NewWithT(t) @@ -266,7 +315,7 @@ func TestCleanupConfigFields(t *testing.T) { } cleanupConfigFields(kcpConfig, machineConfig) g.Expect(kcpConfig.JoinConfiguration).ToNot(BeNil()) - g.Expect(machineConfig.Spec.JoinConfiguration.TypeMeta).To(Equal(metav1.TypeMeta{})) + g.Expect(machineConfig.Spec.JoinConfiguration.TypeMeta).To(BeComparableTo(metav1.TypeMeta{})) }) } @@ -274,12 +323,10 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { t.Run("returns true if the machine does not have a bootstrap config", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{} - g.Expect(matchInitOrJoinConfiguration(nil, kcp)).To(BeTrue()) - }) - t.Run("returns true if the there are problems reading the bootstrap config", func(t *testing.T) { - g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{} - g.Expect(matchInitOrJoinConfiguration(nil, kcp)).To(BeTrue()) + match, diff, err := matchInitOrJoinConfiguration(nil, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(diff).To(BeEmpty()) }) t.Run("returns true if one format is empty and the other one cloud-config", func(t *testing.T) { g := NewWithT(t) @@ -325,7 +372,10 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { }, }, } - g.Expect(matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp)).To(BeTrue()) + match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(diff).To(BeEmpty()) }) t.Run("returns true if InitConfiguration is equal", func(t *testing.T) { g := NewWithT(t) @@ -373,7 +423,10 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { }, }, } - g.Expect(matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp)).To(BeTrue()) + match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(diff).To(BeEmpty()) }) t.Run("returns false if InitConfiguration is NOT equal", func(t *testing.T) { g := NewWithT(t) @@ -425,7 +478,29 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { }, }, } - g.Expect(matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp)).To(BeFalse()) + match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(diff).To(BeComparableTo(`&v1beta1.KubeadmConfigSpec{ + ClusterConfiguration: nil, + InitConfiguration: &v1beta1.InitConfiguration{ + TypeMeta: {}, + BootstrapTokens: nil, + NodeRegistration: v1beta1.NodeRegistrationOptions{ +- Name: "", ++ Name: "A new name", + CRISocket: "", + Taints: nil, + ... // 3 identical fields + }, + LocalAPIEndpoint: {}, + SkipPhases: nil, + Patches: nil, + }, + JoinConfiguration: nil, + Files: nil, + ... // 10 identical fields + }`)) }) t.Run("returns true if JoinConfiguration is equal", func(t *testing.T) { g := NewWithT(t) @@ -473,7 +548,10 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { }, }, } - g.Expect(matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp)).To(BeTrue()) + match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(diff).To(BeEmpty()) }) t.Run("returns false if JoinConfiguration is NOT equal", func(t *testing.T) { g := NewWithT(t) @@ -525,7 +603,29 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { }, }, } - g.Expect(matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp)).To(BeFalse()) + match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(diff).To(BeComparableTo(`&v1beta1.KubeadmConfigSpec{ + ClusterConfiguration: nil, + InitConfiguration: nil, + JoinConfiguration: &v1beta1.JoinConfiguration{ + TypeMeta: {}, + NodeRegistration: v1beta1.NodeRegistrationOptions{ +- Name: "", ++ Name: "A new name", + CRISocket: "", + Taints: nil, + ... // 3 identical fields + }, + CACertPath: "", + Discovery: {}, + ... // 3 identical fields + }, + Files: nil, + DiskSetup: nil, + ... // 9 identical fields + }`)) }) t.Run("returns false if some other configurations are not equal", func(t *testing.T) { g := NewWithT(t) @@ -574,7 +674,19 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { }, }, } - g.Expect(matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp)).To(BeFalse()) + match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(diff).To(BeComparableTo(`&v1beta1.KubeadmConfigSpec{ + ClusterConfiguration: nil, + InitConfiguration: &{NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: nil, +- Files: nil, ++ Files: []v1beta1.File{}, + DiskSetup: nil, + Mounts: nil, + ... // 8 identical fields + }`)) }) } @@ -600,8 +712,10 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: {}, } - f := MatchesKubeadmBootstrapConfig(machineConfigs, kcp) - g.Expect(f(m)).To(BeTrue()) + reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) t.Run("returns false if ClusterConfiguration is NOT equal", func(t *testing.T) { g := NewWithT(t) @@ -624,8 +738,16 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: {}, } - f := MatchesKubeadmBootstrapConfig(machineConfigs, kcp) - g.Expect(f(m)).To(BeFalse()) + reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig ClusterConfiguration is outdated: diff: &v1beta1.ClusterConfiguration{ + ... // 10 identical fields + ImageRepository: "", + FeatureGates: nil, +- ClusterName: "bar", ++ ClusterName: "foo", + }`)) }) t.Run("returns true if InitConfiguration is equal", func(t *testing.T) { g := NewWithT(t) @@ -673,8 +795,10 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - f := MatchesKubeadmBootstrapConfig(machineConfigs, kcp) - g.Expect(f(m)).To(BeTrue()) + reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) t.Run("returns false if InitConfiguration is NOT equal", func(t *testing.T) { g := NewWithT(t) @@ -726,8 +850,29 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - f := MatchesKubeadmBootstrapConfig(machineConfigs, kcp) - g.Expect(f(m)).To(BeFalse()) + reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig InitConfiguration or JoinConfiguration are outdated: diff: &v1beta1.KubeadmConfigSpec{ + ClusterConfiguration: nil, + InitConfiguration: &v1beta1.InitConfiguration{ + TypeMeta: {}, + BootstrapTokens: nil, + NodeRegistration: v1beta1.NodeRegistrationOptions{ +- Name: "", ++ Name: "foo", + CRISocket: "", + Taints: nil, + ... // 3 identical fields + }, + LocalAPIEndpoint: {}, + SkipPhases: nil, + Patches: nil, + }, + JoinConfiguration: nil, + Files: nil, + ... // 10 identical fields + }`)) }) t.Run("returns true if JoinConfiguration is equal", func(t *testing.T) { g := NewWithT(t) @@ -775,8 +920,10 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - f := MatchesKubeadmBootstrapConfig(machineConfigs, kcp) - g.Expect(f(m)).To(BeTrue()) + reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) t.Run("returns false if JoinConfiguration is NOT equal", func(t *testing.T) { g := NewWithT(t) @@ -828,8 +975,29 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - f := MatchesKubeadmBootstrapConfig(machineConfigs, kcp) - g.Expect(f(m)).To(BeFalse()) + reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig InitConfiguration or JoinConfiguration are outdated: diff: &v1beta1.KubeadmConfigSpec{ + ClusterConfiguration: nil, + InitConfiguration: nil, + JoinConfiguration: &v1beta1.JoinConfiguration{ + TypeMeta: {}, + NodeRegistration: v1beta1.NodeRegistrationOptions{ +- Name: "", ++ Name: "foo", + CRISocket: "", + Taints: nil, + ... // 3 identical fields + }, + CACertPath: "", + Discovery: {}, + ... // 3 identical fields + }, + Files: nil, + DiskSetup: nil, + ... // 9 identical fields + }`)) }) t.Run("returns false if some other configurations are not equal", func(t *testing.T) { g := NewWithT(t) @@ -878,8 +1046,19 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - f := MatchesKubeadmBootstrapConfig(machineConfigs, kcp) - g.Expect(f(m)).To(BeFalse()) + reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig InitConfiguration or JoinConfiguration are outdated: diff: &v1beta1.KubeadmConfigSpec{ + ClusterConfiguration: nil, + InitConfiguration: &{NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: nil, +- Files: nil, ++ Files: []v1beta1.File{}, + DiskSetup: nil, + Mounts: nil, + ... // 8 identical fields + }`)) }) t.Run("should match on labels and annotations", func(t *testing.T) { kcp := &controlplanev1.KubeadmControlPlane{ @@ -941,32 +1120,40 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = nil machineConfigs[m.Name].Labels = nil - f := MatchesKubeadmBootstrapConfig(machineConfigs, kcp) - g.Expect(f(m)).To(BeTrue()) + reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) t.Run("by returning true if only labels don't match", func(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = kcp.Spec.MachineTemplate.ObjectMeta.Annotations machineConfigs[m.Name].Labels = nil - f := MatchesKubeadmBootstrapConfig(machineConfigs, kcp) - g.Expect(f(m)).To(BeTrue()) + reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) t.Run("by returning true if only annotations don't match", func(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = nil machineConfigs[m.Name].Labels = kcp.Spec.MachineTemplate.ObjectMeta.Labels - f := MatchesKubeadmBootstrapConfig(machineConfigs, kcp) - g.Expect(f(m)).To(BeTrue()) + reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) t.Run("by returning true if both labels and annotations match", func(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Labels = kcp.Spec.MachineTemplate.ObjectMeta.Labels machineConfigs[m.Name].Annotations = kcp.Spec.MachineTemplate.ObjectMeta.Annotations - f := MatchesKubeadmBootstrapConfig(machineConfigs, kcp) - g.Expect(f(m)).To(BeTrue()) + reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) }) } @@ -974,9 +1161,9 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { func TestMatchesTemplateClonedFrom(t *testing.T) { t.Run("nil machine returns false", func(t *testing.T) { g := NewWithT(t) - g.Expect( - MatchesTemplateClonedFrom(nil, nil)(nil), - ).To(BeFalse()) + reason, match := matchesTemplateClonedFrom(nil, nil, nil) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(Equal("Machine cannot be compared with KCP.spec.machineTemplate.infrastructureRef: Machine is nil")) }) t.Run("returns true if machine not found", func(t *testing.T) { @@ -992,9 +1179,9 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { }, }, } - g.Expect( - MatchesTemplateClonedFrom(map[string]*unstructured.Unstructured{}, kcp)(machine), - ).To(BeTrue()) + reason, match := matchesTemplateClonedFrom(map[string]*unstructured.Unstructured{}, kcp, machine) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) t.Run("matches labels or annotations", func(t *testing.T) { @@ -1052,8 +1239,9 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", }) infraConfigs[m.Name].SetLabels(nil) - f := MatchesTemplateClonedFrom(infraConfigs, kcp) - g.Expect(f(m)).To(BeTrue()) + reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) t.Run("by returning true if only labels don't match", func(t *testing.T) { @@ -1064,8 +1252,9 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { "test": "annotation", }) infraConfigs[m.Name].SetLabels(nil) - f := MatchesTemplateClonedFrom(infraConfigs, kcp) - g.Expect(f(m)).To(BeTrue()) + reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) t.Run("by returning true if only annotations don't match", func(t *testing.T) { @@ -1075,8 +1264,9 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", }) infraConfigs[m.Name].SetLabels(kcp.Spec.MachineTemplate.ObjectMeta.Labels) - f := MatchesTemplateClonedFrom(infraConfigs, kcp) - g.Expect(f(m)).To(BeTrue()) + reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) t.Run("by returning true if both labels and annotations match", func(t *testing.T) { @@ -1087,8 +1277,9 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { "test": "annotation", }) infraConfigs[m.Name].SetLabels(kcp.Spec.MachineTemplate.ObjectMeta.Labels) - f := MatchesTemplateClonedFrom(infraConfigs, kcp) - g.Expect(f(m)).To(BeTrue()) + reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) }) } @@ -1120,9 +1311,10 @@ func TestMatchesTemplateClonedFrom_WithClonedFromAnnotations(t *testing.T) { }, } tests := []struct { - name string - annotations map[string]interface{} - expectMatch bool + name string + annotations map[string]interface{} + expectMatch bool + expectReason string }{ { name: "returns true if annotations don't exist", @@ -1135,7 +1327,8 @@ func TestMatchesTemplateClonedFrom_WithClonedFromAnnotations(t *testing.T) { clusterv1.TemplateClonedFromNameAnnotation: "barfoo1", clusterv1.TemplateClonedFromGroupKindAnnotation: "barfoo2", }, - expectMatch: false, + expectMatch: false, + expectReason: "Infrastructure template on KCP rotated from barfoo2 barfoo1 to GenericMachineTemplate.generic.io infra-foo", }, { name: "returns false if TemplateClonedFromNameAnnotation matches but TemplateClonedFromGroupKindAnnotation doesn't", @@ -1143,7 +1336,8 @@ func TestMatchesTemplateClonedFrom_WithClonedFromAnnotations(t *testing.T) { clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", clusterv1.TemplateClonedFromGroupKindAnnotation: "barfoo2", }, - expectMatch: false, + expectMatch: false, + expectReason: "Infrastructure template on KCP rotated from barfoo2 infra-foo to GenericMachineTemplate.generic.io infra-foo", }, { name: "returns true if both annotations match", @@ -1171,9 +1365,9 @@ func TestMatchesTemplateClonedFrom_WithClonedFromAnnotations(t *testing.T) { }, }, } - g.Expect( - MatchesTemplateClonedFrom(infraConfigs, kcp)(machine), - ).To(Equal(tt.expectMatch)) + reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, machine) + g.Expect(match).To(Equal(tt.expectMatch)) + g.Expect(reason).To(Equal(tt.expectReason)) }) } } diff --git a/controlplane/kubeadm/internal/suite_test.go b/controlplane/kubeadm/internal/suite_test.go index f94ab56994e6..65e3c9a61ff1 100644 --- a/controlplane/kubeadm/internal/suite_test.go +++ b/controlplane/kubeadm/internal/suite_test.go @@ -17,22 +17,44 @@ limitations under the License. package internal import ( + "context" + "fmt" "os" "testing" + corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api/internal/test/envtest" ) var ( - env *envtest.Environment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() + secretCachingClient client.Client ) func TestMain(m *testing.M) { + setupReconcilers := func(_ context.Context, mgr ctrl.Manager) { + var err error + secretCachingClient, err = client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + panic(fmt.Sprintf("unable to create secretCachingClient: %v", err)) + } + } os.Exit(envtest.Run(ctx, envtest.RunInput{ - M: m, - SetupEnv: func(e *envtest.Environment) { env = e }, + M: m, + ManagerUncachedObjs: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + SetupEnv: func(e *envtest.Environment) { env = e }, + SetupReconcilers: setupReconcilers, })) } diff --git a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_webhook.go b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane.go similarity index 72% rename from controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_webhook.go rename to controlplane/kubeadm/internal/webhooks/kubeadm_control_plane.go index 031115fb6dce..d848d4616bba 100644 --- a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_webhook.go +++ b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane.go @@ -14,14 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package webhooks import ( + "context" "encoding/json" "fmt" "strings" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/coredns/corefile-migration/migration" jsonpatch "github.com/evanphx/json-patch/v5" "github.com/pkg/errors" @@ -31,31 +32,46 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/internal/util/kubeadm" "sigs.k8s.io/cluster-api/util/container" "sigs.k8s.io/cluster-api/util/version" ) -func (in *KubeadmControlPlane) SetupWebhookWithManager(mgr ctrl.Manager) error { +func (webhook *KubeadmControlPlane) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). - For(in). + For(&controlplanev1.KubeadmControlPlane{}). + WithDefaulter(webhook). + WithValidator(webhook). Complete() } // +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplane,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanes,versions=v1beta1,name=default.kubeadmcontrolplane.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 // +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplane,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanes,versions=v1beta1,name=validation.kubeadmcontrolplane.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 -var _ webhook.Defaulter = &KubeadmControlPlane{} -var _ webhook.Validator = &KubeadmControlPlane{} +// KubeadmControlPlane implements a validation and defaulting webhook for KubeadmControlPlane. +type KubeadmControlPlane struct{} + +var _ webhook.CustomValidator = &KubeadmControlPlane{} +var _ webhook.CustomDefaulter = &KubeadmControlPlane{} // Default implements webhook.Defaulter so a webhook will be registered for the type. -func (in *KubeadmControlPlane) Default() { - defaultKubeadmControlPlaneSpec(&in.Spec, in.Namespace) +func (webhook *KubeadmControlPlane) Default(_ context.Context, obj runtime.Object) error { + k, ok := obj.(*controlplanev1.KubeadmControlPlane) + if !ok { + return apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", obj)) + } + + defaultKubeadmControlPlaneSpec(&k.Spec, k.Namespace) + + return nil } -func defaultKubeadmControlPlaneSpec(s *KubeadmControlPlaneSpec, namespace string) { +func defaultKubeadmControlPlaneSpec(s *controlplanev1.KubeadmControlPlaneSpec, namespace string) { if s.Replicas == nil { replicas := int32(1) s.Replicas = &replicas @@ -69,26 +85,26 @@ func defaultKubeadmControlPlaneSpec(s *KubeadmControlPlaneSpec, namespace string s.Version = "v" + s.Version } - bootstrapv1.DefaultKubeadmConfigSpec(&s.KubeadmConfigSpec) + s.KubeadmConfigSpec.Default() s.RolloutStrategy = defaultRolloutStrategy(s.RolloutStrategy) } -func defaultRolloutStrategy(rolloutStrategy *RolloutStrategy) *RolloutStrategy { +func defaultRolloutStrategy(rolloutStrategy *controlplanev1.RolloutStrategy) *controlplanev1.RolloutStrategy { ios1 := intstr.FromInt(1) if rolloutStrategy == nil { - rolloutStrategy = &RolloutStrategy{} + rolloutStrategy = &controlplanev1.RolloutStrategy{} } // Enforce RollingUpdate strategy and default MaxSurge if not set. if rolloutStrategy != nil { if len(rolloutStrategy.Type) == 0 { - rolloutStrategy.Type = RollingUpdateStrategyType + rolloutStrategy.Type = controlplanev1.RollingUpdateStrategyType } - if rolloutStrategy.Type == RollingUpdateStrategyType { + if rolloutStrategy.Type == controlplanev1.RollingUpdateStrategyType { if rolloutStrategy.RollingUpdate == nil { - rolloutStrategy.RollingUpdate = &RollingUpdate{} + rolloutStrategy.RollingUpdate = &controlplanev1.RollingUpdate{} } rolloutStrategy.RollingUpdate.MaxSurge = intstr.ValueOrDefault(rolloutStrategy.RollingUpdate.MaxSurge, ios1) } @@ -98,15 +114,20 @@ func defaultRolloutStrategy(rolloutStrategy *RolloutStrategy) *RolloutStrategy { } // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (in *KubeadmControlPlane) ValidateCreate() error { - spec := in.Spec - allErrs := validateKubeadmControlPlaneSpec(spec, in.Namespace, field.NewPath("spec")) - allErrs = append(allErrs, validateClusterConfiguration(spec.KubeadmConfigSpec.ClusterConfiguration, nil, field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration"))...) +func (webhook *KubeadmControlPlane) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + k, ok := obj.(*controlplanev1.KubeadmControlPlane) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", obj)) + } + + spec := k.Spec + allErrs := validateKubeadmControlPlaneSpec(spec, k.Namespace, field.NewPath("spec")) + allErrs = append(allErrs, validateClusterConfiguration(nil, spec.KubeadmConfigSpec.ClusterConfiguration, field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration"))...) allErrs = append(allErrs, spec.KubeadmConfigSpec.Validate(field.NewPath("spec", "kubeadmConfigSpec"))...) if len(allErrs) > 0 { - return apierrors.NewInvalid(GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), in.Name, allErrs) + return nil, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), k.Name, allErrs) } - return nil + return nil, nil } const ( @@ -129,37 +150,62 @@ const ( ntp = "ntp" ignition = "ignition" diskSetup = "diskSetup" + featureGates = "featureGates" ) const minimumCertificatesExpiryDays = 7 // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (in *KubeadmControlPlane) ValidateUpdate(old runtime.Object) error { +func (webhook *KubeadmControlPlane) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { // add a * to indicate everything beneath is ok. // For example, {"spec", "*"} will allow any path under "spec" to change. allowedPaths := [][]string{ + // metadata {"metadata", "*"}, + // spec.kubeadmConfigSpec.clusterConfiguration {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "local", "imageRepository"}, {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "local", "imageTag"}, {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "local", "extraArgs"}, {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "local", "extraArgs", "*"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "local", "dataDir"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "local", "peerCertSANs"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "local", "serverCertSANs"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "external", "endpoints"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "external", "caFile"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "external", "certFile"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "external", "keyFile"}, {spec, kubeadmConfigSpec, clusterConfiguration, "dns", "imageRepository"}, {spec, kubeadmConfigSpec, clusterConfiguration, "dns", "imageTag"}, {spec, kubeadmConfigSpec, clusterConfiguration, "imageRepository"}, + {spec, kubeadmConfigSpec, clusterConfiguration, featureGates}, + {spec, kubeadmConfigSpec, clusterConfiguration, featureGates, "*"}, {spec, kubeadmConfigSpec, clusterConfiguration, apiServer}, {spec, kubeadmConfigSpec, clusterConfiguration, apiServer, "*"}, {spec, kubeadmConfigSpec, clusterConfiguration, controllerManager}, {spec, kubeadmConfigSpec, clusterConfiguration, controllerManager, "*"}, {spec, kubeadmConfigSpec, clusterConfiguration, scheduler}, {spec, kubeadmConfigSpec, clusterConfiguration, scheduler, "*"}, + // spec.kubeadmConfigSpec.initConfiguration {spec, kubeadmConfigSpec, initConfiguration, nodeRegistration}, {spec, kubeadmConfigSpec, initConfiguration, nodeRegistration, "*"}, {spec, kubeadmConfigSpec, initConfiguration, patches, directory}, + {spec, kubeadmConfigSpec, initConfiguration, patches}, {spec, kubeadmConfigSpec, initConfiguration, skipPhases}, + {spec, kubeadmConfigSpec, initConfiguration, "bootstrapTokens"}, + {spec, kubeadmConfigSpec, initConfiguration, "localAPIEndpoint"}, + {spec, kubeadmConfigSpec, initConfiguration, "localAPIEndpoint", "*"}, + // spec.kubeadmConfigSpec.joinConfiguration {spec, kubeadmConfigSpec, joinConfiguration, nodeRegistration}, {spec, kubeadmConfigSpec, joinConfiguration, nodeRegistration, "*"}, {spec, kubeadmConfigSpec, joinConfiguration, patches, directory}, + {spec, kubeadmConfigSpec, joinConfiguration, patches}, {spec, kubeadmConfigSpec, joinConfiguration, skipPhases}, + {spec, kubeadmConfigSpec, joinConfiguration, "caCertPath"}, + {spec, kubeadmConfigSpec, joinConfiguration, "controlPlane"}, + {spec, kubeadmConfigSpec, joinConfiguration, "controlPlane", "*"}, + {spec, kubeadmConfigSpec, joinConfiguration, "discovery"}, + {spec, kubeadmConfigSpec, joinConfiguration, "discovery", "*"}, + // spec.kubeadmConfigSpec {spec, kubeadmConfigSpec, preKubeadmCommands}, {spec, kubeadmConfigSpec, postKubeadmCommands}, {spec, kubeadmConfigSpec, files}, @@ -171,6 +217,10 @@ func (in *KubeadmControlPlane) ValidateUpdate(old runtime.Object) error { {spec, kubeadmConfigSpec, ignition, "*"}, {spec, kubeadmConfigSpec, diskSetup}, {spec, kubeadmConfigSpec, diskSetup, "*"}, + {spec, kubeadmConfigSpec, "format"}, + {spec, kubeadmConfigSpec, "mounts"}, + {spec, kubeadmConfigSpec, "useExperimentalRetryJoin"}, + // spec.machineTemplate {spec, "machineTemplate", "metadata"}, {spec, "machineTemplate", "metadata", "*"}, {spec, "machineTemplate", "infrastructureRef", "apiVersion"}, @@ -179,6 +229,7 @@ func (in *KubeadmControlPlane) ValidateUpdate(old runtime.Object) error { {spec, "machineTemplate", "nodeDrainTimeout"}, {spec, "machineTemplate", "nodeVolumeDetachTimeout"}, {spec, "machineTemplate", "nodeDeletionTimeout"}, + // spec {spec, "replicas"}, {spec, "version"}, {spec, "remediationStrategy"}, @@ -190,35 +241,34 @@ func (in *KubeadmControlPlane) ValidateUpdate(old runtime.Object) error { {spec, "rolloutStrategy", "*"}, } - allErrs := validateKubeadmControlPlaneSpec(in.Spec, in.Namespace, field.NewPath("spec")) - - prev, ok := old.(*KubeadmControlPlane) + oldK, ok := oldObj.(*controlplanev1.KubeadmControlPlane) if !ok { - return apierrors.NewBadRequest(fmt.Sprintf("expecting KubeadmControlPlane but got a %T", old)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", oldObj)) } - // NOTE: Defaulting for the format field has been added in v1.1.0 after implementing ignition support. - // This allows existing KCP objects to pick up the new default. - if prev.Spec.KubeadmConfigSpec.Format == "" && in.Spec.KubeadmConfigSpec.Format == bootstrapv1.CloudConfig { - allowedPaths = append(allowedPaths, []string{spec, kubeadmConfigSpec, "format"}) + newK, ok := newObj.(*controlplanev1.KubeadmControlPlane) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", newObj)) } - originalJSON, err := json.Marshal(prev) + allErrs := validateKubeadmControlPlaneSpec(newK.Spec, newK.Namespace, field.NewPath("spec")) + + originalJSON, err := json.Marshal(oldK) if err != nil { - return apierrors.NewInternalError(err) + return nil, apierrors.NewInternalError(err) } - modifiedJSON, err := json.Marshal(in) + modifiedJSON, err := json.Marshal(newK) if err != nil { - return apierrors.NewInternalError(err) + return nil, apierrors.NewInternalError(err) } diff, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) if err != nil { - return apierrors.NewInternalError(err) + return nil, apierrors.NewInternalError(err) } jsonPatch := map[string]interface{}{} if err := json.Unmarshal(diff, &jsonPatch); err != nil { - return apierrors.NewInternalError(err) + return nil, apierrors.NewInternalError(err) } // Build a list of all paths that are trying to change diffpaths := paths([]string{}, jsonPatch) @@ -237,19 +287,19 @@ func (in *KubeadmControlPlane) ValidateUpdate(old runtime.Object) error { } } - allErrs = append(allErrs, in.validateVersion(prev.Spec.Version)...) - allErrs = append(allErrs, validateClusterConfiguration(in.Spec.KubeadmConfigSpec.ClusterConfiguration, prev.Spec.KubeadmConfigSpec.ClusterConfiguration, field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration"))...) - allErrs = append(allErrs, in.validateCoreDNSVersion(prev)...) - allErrs = append(allErrs, in.Spec.KubeadmConfigSpec.Validate(field.NewPath("spec", "kubeadmConfigSpec"))...) + allErrs = append(allErrs, webhook.validateVersion(oldK, newK)...) + allErrs = append(allErrs, validateClusterConfiguration(oldK.Spec.KubeadmConfigSpec.ClusterConfiguration, newK.Spec.KubeadmConfigSpec.ClusterConfiguration, field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration"))...) + allErrs = append(allErrs, webhook.validateCoreDNSVersion(oldK, newK)...) + allErrs = append(allErrs, newK.Spec.KubeadmConfigSpec.Validate(field.NewPath("spec", "kubeadmConfigSpec"))...) if len(allErrs) > 0 { - return apierrors.NewInvalid(GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), in.Name, allErrs) + return nil, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), newK.Name, allErrs) } - return nil + return nil, nil } -func validateKubeadmControlPlaneSpec(s KubeadmControlPlaneSpec, namespace string, pathPrefix *field.Path) field.ErrorList { +func validateKubeadmControlPlaneSpec(s controlplanev1.KubeadmControlPlaneSpec, namespace string, pathPrefix *field.Path) field.ErrorList { allErrs := field.ErrorList{} if s.Replicas == nil { @@ -333,6 +383,9 @@ func validateKubeadmControlPlaneSpec(s KubeadmControlPlaneSpec, namespace string ) } + // Validate the metadata of the MachineTemplate + allErrs = append(allErrs, s.MachineTemplate.ObjectMeta.Validate(pathPrefix.Child("machineTemplate", "metadata"))...) + if !version.KubeSemver.MatchString(s.Version) { allErrs = append(allErrs, field.Invalid(pathPrefix.Child("version"), s.Version, "must be a valid semantic version")) } @@ -343,7 +396,7 @@ func validateKubeadmControlPlaneSpec(s KubeadmControlPlaneSpec, namespace string return allErrs } -func validateRolloutBefore(rolloutBefore *RolloutBefore, pathPrefix *field.Path) field.ErrorList { +func validateRolloutBefore(rolloutBefore *controlplanev1.RolloutBefore, pathPrefix *field.Path) field.ErrorList { allErrs := field.ErrorList{} if rolloutBefore == nil { @@ -359,14 +412,14 @@ func validateRolloutBefore(rolloutBefore *RolloutBefore, pathPrefix *field.Path) return allErrs } -func validateRolloutStrategy(rolloutStrategy *RolloutStrategy, replicas *int32, pathPrefix *field.Path) field.ErrorList { +func validateRolloutStrategy(rolloutStrategy *controlplanev1.RolloutStrategy, replicas *int32, pathPrefix *field.Path) field.ErrorList { allErrs := field.ErrorList{} if rolloutStrategy == nil { return allErrs } - if rolloutStrategy.Type != RollingUpdateStrategyType { + if rolloutStrategy.Type != controlplanev1.RollingUpdateStrategyType { allErrs = append( allErrs, field.Required( @@ -402,7 +455,7 @@ func validateRolloutStrategy(rolloutStrategy *RolloutStrategy, replicas *int32, return allErrs } -func validateClusterConfiguration(newClusterConfiguration, oldClusterConfiguration *bootstrapv1.ClusterConfiguration, pathPrefix *field.Path) field.ErrorList { +func validateClusterConfiguration(oldClusterConfiguration, newClusterConfiguration *bootstrapv1.ClusterConfiguration, pathPrefix *field.Path) field.ErrorList { allErrs := field.ErrorList{} if newClusterConfiguration == nil { @@ -529,22 +582,22 @@ func paths(path []string, diff map[string]interface{}) [][]string { return allPaths } -func (in *KubeadmControlPlane) validateCoreDNSVersion(prev *KubeadmControlPlane) (allErrs field.ErrorList) { - if in.Spec.KubeadmConfigSpec.ClusterConfiguration == nil || prev.Spec.KubeadmConfigSpec.ClusterConfiguration == nil { +func (webhook *KubeadmControlPlane) validateCoreDNSVersion(oldK, newK *controlplanev1.KubeadmControlPlane) (allErrs field.ErrorList) { + if newK.Spec.KubeadmConfigSpec.ClusterConfiguration == nil || oldK.Spec.KubeadmConfigSpec.ClusterConfiguration == nil { return allErrs } // return if either current or target versions is empty - if prev.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag == "" || in.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag == "" { + if newK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag == "" || oldK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag == "" { return allErrs } - targetDNS := &in.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS + targetDNS := &newK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS - fromVersion, err := version.ParseMajorMinorPatchTolerant(prev.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag) + fromVersion, err := version.ParseMajorMinorPatchTolerant(oldK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag) if err != nil { allErrs = append(allErrs, field.Invalid( field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "dns", "imageTag"), - prev.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag, + oldK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag, fmt.Sprintf("failed to parse current CoreDNS version: %v", err), ), ) @@ -580,7 +633,8 @@ func (in *KubeadmControlPlane) validateCoreDNSVersion(prev *KubeadmControlPlane) return allErrs } -func (in *KubeadmControlPlane) validateVersion(previousVersion string) (allErrs field.ErrorList) { +func (webhook *KubeadmControlPlane) validateVersion(oldK, newK *controlplanev1.KubeadmControlPlane) (allErrs field.ErrorList) { + previousVersion := oldK.Spec.Version fromVersion, err := version.ParseMajorMinorPatch(previousVersion) if err != nil { allErrs = append(allErrs, @@ -592,12 +646,12 @@ func (in *KubeadmControlPlane) validateVersion(previousVersion string) (allErrs return allErrs } - toVersion, err := version.ParseMajorMinorPatch(in.Spec.Version) + toVersion, err := version.ParseMajorMinorPatch(newK.Spec.Version) if err != nil { allErrs = append(allErrs, field.InternalError( field.NewPath("spec", "version"), - errors.Wrapf(err, "failed to parse updated kubeadmcontrolplane version: %s", in.Spec.Version), + errors.Wrapf(err, "failed to parse updated kubeadmcontrolplane version: %s", newK.Spec.Version), ), ) return allErrs @@ -629,7 +683,7 @@ func (in *KubeadmControlPlane) validateVersion(previousVersion string) (allErrs allErrs = append(allErrs, field.Forbidden( field.NewPath("spec", "version"), - fmt.Sprintf("cannot update Kubernetes version from %s to %s", previousVersion, in.Spec.Version), + fmt.Sprintf("cannot update Kubernetes version from %s to %s", previousVersion, newK.Spec.Version), ), ) } @@ -642,8 +696,8 @@ func (in *KubeadmControlPlane) validateVersion(previousVersion string) (allErrs // given how the migration has been implemented in kubeadm. // // Block if imageRepository is not set (i.e. the default registry should be used), - if (in.Spec.KubeadmConfigSpec.ClusterConfiguration == nil || - in.Spec.KubeadmConfigSpec.ClusterConfiguration.ImageRepository == "") && + if (newK.Spec.KubeadmConfigSpec.ClusterConfiguration == nil || + newK.Spec.KubeadmConfigSpec.ClusterConfiguration.ImageRepository == "") && // the version changed (i.e. we have an upgrade), toVersion.NE(fromVersion) && // the version is >= v1.22.0 and < v1.26.0 @@ -663,6 +717,6 @@ func (in *KubeadmControlPlane) validateVersion(previousVersion string) (allErrs } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (in *KubeadmControlPlane) ValidateDelete() error { - return nil +func (webhook *KubeadmControlPlane) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil } diff --git a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_webhook_test.go b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go similarity index 84% rename from controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_webhook_test.go rename to controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go index 6fdd69d706d9..9c62b22ad4e6 100644 --- a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_webhook_test.go +++ b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go @@ -14,9 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package webhooks import ( + "strings" "testing" "time" @@ -25,30 +26,37 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/feature" - utildefaulting "sigs.k8s.io/cluster-api/util/defaulting" + "sigs.k8s.io/cluster-api/internal/webhooks/util" +) + +var ( + invalidNamespaceName = "bar" + ctx = ctrl.SetupSignalHandler() ) func TestKubeadmControlPlaneDefault(t *testing.T) { g := NewWithT(t) - kcp := &KubeadmControlPlane{ + kcp := &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", }, - Spec: KubeadmControlPlaneSpec{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.18.3", - MachineTemplate: KubeadmControlPlaneMachineTemplate{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ APIVersion: "test/v1alpha1", Kind: "UnknownInfraMachine", Name: "foo", }, }, - RolloutStrategy: &RolloutStrategy{}, + RolloutStrategy: &controlplanev1.RolloutStrategy{}, }, } updateDefaultingValidationKCP := kcp.DeepCopy() @@ -59,24 +67,25 @@ func TestKubeadmControlPlaneDefault(t *testing.T) { Name: "foo", Namespace: "foo", } - t.Run("for KubeadmControlPlane", utildefaulting.DefaultValidateTest(updateDefaultingValidationKCP)) - kcp.Default() + webhook := &KubeadmControlPlane{} + t.Run("for KubeadmControlPlane", util.CustomDefaultValidateTest(ctx, updateDefaultingValidationKCP, webhook)) + g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) g.Expect(kcp.Spec.KubeadmConfigSpec.Format).To(Equal(bootstrapv1.CloudConfig)) g.Expect(kcp.Spec.MachineTemplate.InfrastructureRef.Namespace).To(Equal(kcp.Namespace)) g.Expect(kcp.Spec.Version).To(Equal("v1.18.3")) - g.Expect(kcp.Spec.RolloutStrategy.Type).To(Equal(RollingUpdateStrategyType)) + g.Expect(kcp.Spec.RolloutStrategy.Type).To(Equal(controlplanev1.RollingUpdateStrategyType)) g.Expect(kcp.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal).To(Equal(int32(1))) } func TestKubeadmControlPlaneValidateCreate(t *testing.T) { - valid := &KubeadmControlPlane{ + valid := &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "foo", }, - Spec: KubeadmControlPlaneSpec{ - MachineTemplate: KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ APIVersion: "test/v1alpha1", Kind: "UnknownInfraMachine", @@ -87,11 +96,11 @@ func TestKubeadmControlPlaneValidateCreate(t *testing.T) { KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{}, }, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Version: "v1.19.0", - RolloutStrategy: &RolloutStrategy{ - Type: RollingUpdateStrategyType, - RollingUpdate: &RollingUpdate{ + RolloutStrategy: &controlplanev1.RolloutStrategy{ + Type: controlplanev1.RollingUpdateStrategyType, + RollingUpdate: &controlplanev1.RollingUpdate{ MaxSurge: &intstr.IntOrString{ IntVal: 1, }, @@ -108,16 +117,16 @@ func TestKubeadmControlPlaneValidateCreate(t *testing.T) { stringMaxSurge.Spec.RolloutStrategy.RollingUpdate.MaxSurge = &val invalidNamespace := valid.DeepCopy() - invalidNamespace.Spec.MachineTemplate.InfrastructureRef.Namespace = "bar" + invalidNamespace.Spec.MachineTemplate.InfrastructureRef.Namespace = invalidNamespaceName missingReplicas := valid.DeepCopy() missingReplicas.Spec.Replicas = nil zeroReplicas := valid.DeepCopy() - zeroReplicas.Spec.Replicas = pointer.Int32(0) + zeroReplicas.Spec.Replicas = ptr.To[int32](0) evenReplicas := valid.DeepCopy() - evenReplicas.Spec.Replicas = pointer.Int32(2) + evenReplicas.Spec.Replicas = ptr.To[int32](2) evenReplicasExternalEtcd := evenReplicas.DeepCopy() evenReplicasExternalEtcd.Spec.KubeadmConfigSpec = bootstrapv1.KubeadmConfigSpec{ @@ -141,8 +150,8 @@ func TestKubeadmControlPlaneValidateCreate(t *testing.T) { invalidCoreDNSVersion.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag = "v1.7" // not a valid semantic version invalidRolloutBeforeCertificateExpiryDays := valid.DeepCopy() - invalidRolloutBeforeCertificateExpiryDays.Spec.RolloutBefore = &RolloutBefore{ - CertificatesExpiryDays: pointer.Int32(5), // less than minimum + invalidRolloutBeforeCertificateExpiryDays.Spec.RolloutBefore = &controlplanev1.RolloutBefore{ + CertificatesExpiryDays: ptr.To[int32](5), // less than minimum } invalidIgnitionConfiguration := valid.DeepCopy() @@ -152,11 +161,21 @@ func TestKubeadmControlPlaneValidateCreate(t *testing.T) { validIgnitionConfiguration.Spec.KubeadmConfigSpec.Format = bootstrapv1.Ignition validIgnitionConfiguration.Spec.KubeadmConfigSpec.Ignition = &bootstrapv1.IgnitionSpec{} + invalidMetadata := valid.DeepCopy() + invalidMetadata.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{ + "foo": "$invalid-key", + "bar": strings.Repeat("a", 64) + "too-long-value", + "/invalid-key": "foo", + } + invalidMetadata.Spec.MachineTemplate.ObjectMeta.Annotations = map[string]string{ + "/invalid-key": "foo", + } + tests := []struct { name string enableIgnitionFeature bool expectErr bool - kcp *KubeadmControlPlane + kcp *controlplanev1.KubeadmControlPlane }{ { name: "should succeed when given a valid config", @@ -236,6 +255,12 @@ func TestKubeadmControlPlaneValidateCreate(t *testing.T) { expectErr: false, kcp: validIgnitionConfiguration, }, + { + name: "should return error for invalid metadata", + enableIgnitionFeature: true, + expectErr: true, + kcp: invalidMetadata, + }, } for _, tt := range tests { @@ -248,23 +273,27 @@ func TestKubeadmControlPlaneValidateCreate(t *testing.T) { g := NewWithT(t) + webhook := &KubeadmControlPlane{} + + warnings, err := webhook.ValidateCreate(ctx, tt.kcp) if tt.expectErr { - g.Expect(tt.kcp.ValidateCreate()).NotTo(Succeed()) + g.Expect(err).To(HaveOccurred()) } else { - g.Expect(tt.kcp.ValidateCreate()).To(Succeed()) + g.Expect(err).ToNot(HaveOccurred()) } + g.Expect(warnings).To(BeEmpty()) }) } } func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { - before := &KubeadmControlPlane{ + before := &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "foo", }, - Spec: KubeadmControlPlaneSpec{ - MachineTemplate: KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ APIVersion: "test/v1alpha1", Kind: "UnknownInfraMachine", @@ -275,10 +304,10 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second}, NodeDeletionTimeout: &metav1.Duration{Duration: time.Second}, }, - Replicas: pointer.Int32(1), - RolloutStrategy: &RolloutStrategy{ - Type: RollingUpdateStrategyType, - RollingUpdate: &RollingUpdate{ + Replicas: ptr.To[int32](1), + RolloutStrategy: &controlplanev1.RolloutStrategy{ + Type: controlplanev1.RollingUpdateStrategyType, + RollingUpdate: &controlplanev1.RollingUpdate{ MaxSurge: &intstr.IntOrString{ IntVal: 1, }, @@ -334,35 +363,29 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { }, NTP: &bootstrapv1.NTP{ Servers: []string{"test-server-1", "test-server-2"}, - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), }, }, Version: "v1.16.6", - RolloutBefore: &RolloutBefore{ - CertificatesExpiryDays: pointer.Int32(7), + RolloutBefore: &controlplanev1.RolloutBefore{ + CertificatesExpiryDays: ptr.To[int32](7), }, }, } updateMaxSurgeVal := before.DeepCopy() updateMaxSurgeVal.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal = int32(0) - updateMaxSurgeVal.Spec.Replicas = pointer.Int32(3) + updateMaxSurgeVal.Spec.Replicas = ptr.To[int32](3) wrongReplicaCountForScaleIn := before.DeepCopy() wrongReplicaCountForScaleIn.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal = int32(0) - invalidUpdateKubeadmConfigInit := before.DeepCopy() - invalidUpdateKubeadmConfigInit.Spec.KubeadmConfigSpec.InitConfiguration = &bootstrapv1.InitConfiguration{} - validUpdateKubeadmConfigInit := before.DeepCopy() validUpdateKubeadmConfigInit.Spec.KubeadmConfigSpec.InitConfiguration.NodeRegistration = bootstrapv1.NodeRegistrationOptions{} invalidUpdateKubeadmConfigCluster := before.DeepCopy() invalidUpdateKubeadmConfigCluster.Spec.KubeadmConfigSpec.ClusterConfiguration = &bootstrapv1.ClusterConfiguration{} - invalidUpdateKubeadmConfigJoin := before.DeepCopy() - invalidUpdateKubeadmConfigJoin.Spec.KubeadmConfigSpec.JoinConfiguration = &bootstrapv1.JoinConfiguration{} - validUpdateKubeadmConfigJoin := before.DeepCopy() validUpdateKubeadmConfigJoin.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration = bootstrapv1.NodeRegistrationOptions{} @@ -404,27 +427,27 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { validUpdate.Spec.MachineTemplate.NodeDrainTimeout = &metav1.Duration{Duration: 10 * time.Second} validUpdate.Spec.MachineTemplate.NodeVolumeDetachTimeout = &metav1.Duration{Duration: 10 * time.Second} validUpdate.Spec.MachineTemplate.NodeDeletionTimeout = &metav1.Duration{Duration: 10 * time.Second} - validUpdate.Spec.Replicas = pointer.Int32(5) + validUpdate.Spec.Replicas = ptr.To[int32](5) now := metav1.NewTime(time.Now()) validUpdate.Spec.RolloutAfter = &now - validUpdate.Spec.RolloutBefore = &RolloutBefore{ - CertificatesExpiryDays: pointer.Int32(14), + validUpdate.Spec.RolloutBefore = &controlplanev1.RolloutBefore{ + CertificatesExpiryDays: ptr.To[int32](14), } - validUpdate.Spec.RemediationStrategy = &RemediationStrategy{ - MaxRetry: pointer.Int32(50), + validUpdate.Spec.RemediationStrategy = &controlplanev1.RemediationStrategy{ + MaxRetry: ptr.To[int32](50), MinHealthyPeriod: &metav1.Duration{Duration: 10 * time.Hour}, RetryPeriod: metav1.Duration{Duration: 10 * time.Minute}, } validUpdate.Spec.KubeadmConfigSpec.Format = bootstrapv1.CloudConfig scaleToZero := before.DeepCopy() - scaleToZero.Spec.Replicas = pointer.Int32(0) + scaleToZero.Spec.Replicas = ptr.To[int32](0) scaleToEven := before.DeepCopy() - scaleToEven.Spec.Replicas = pointer.Int32(2) + scaleToEven.Spec.Replicas = ptr.To[int32](2) invalidNamespace := before.DeepCopy() - invalidNamespace.Spec.MachineTemplate.InfrastructureRef.Namespace = "bar" + invalidNamespace.Spec.MachineTemplate.InfrastructureRef.Namespace = invalidNamespaceName missingReplicas := before.DeepCopy() missingReplicas.Spec.Replicas = nil @@ -557,8 +580,6 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { localDataDir.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = &bootstrapv1.LocalEtcd{ DataDir: "some local data dir", } - modifyLocalDataDir := localDataDir.DeepCopy() - modifyLocalDataDir.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local.DataDir = "a different local data dir" localPeerCertSANs := before.DeepCopy() localPeerCertSANs.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = &bootstrapv1.LocalEtcd{ @@ -584,7 +605,7 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { }, } scaleToEvenExternalEtcdCluster := beforeExternalEtcdCluster.DeepCopy() - scaleToEvenExternalEtcdCluster.Spec.Replicas = pointer.Int32(2) + scaleToEvenExternalEtcdCluster.Spec.Replicas = ptr.To[int32](2) beforeInvalidEtcdCluster := before.DeepCopy() beforeInvalidEtcdCluster.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd = bootstrapv1.Etcd{ @@ -615,11 +636,11 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { updateNTPServers.Spec.KubeadmConfigSpec.NTP.Servers = []string{"new-server"} disableNTPServers := before.DeepCopy() - disableNTPServers.Spec.KubeadmConfigSpec.NTP.Enabled = pointer.Bool(false) + disableNTPServers.Spec.KubeadmConfigSpec.NTP.Enabled = ptr.To(false) invalidRolloutBeforeCertificateExpiryDays := before.DeepCopy() - invalidRolloutBeforeCertificateExpiryDays.Spec.RolloutBefore = &RolloutBefore{ - CertificatesExpiryDays: pointer.Int32(5), // less than minimum + invalidRolloutBeforeCertificateExpiryDays.Spec.RolloutBefore = &controlplanev1.RolloutBefore{ + CertificatesExpiryDays: ptr.To[int32](5), // less than minimum } unsetRolloutBefore := before.DeepCopy() @@ -663,12 +684,33 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { }, } + switchFromCloudInitToIgnition := before.DeepCopy() + switchFromCloudInitToIgnition.Spec.KubeadmConfigSpec.Format = bootstrapv1.Ignition + switchFromCloudInitToIgnition.Spec.KubeadmConfigSpec.Mounts = []bootstrapv1.MountPoints{ + {"/var/lib/testdir", "/var/lib/etcd/data"}, + } + + invalidMetadata := before.DeepCopy() + invalidMetadata.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{ + "foo": "$invalid-key", + "bar": strings.Repeat("a", 64) + "too-long-value", + "/invalid-key": "foo", + } + invalidMetadata.Spec.MachineTemplate.ObjectMeta.Annotations = map[string]string{ + "/invalid-key": "foo", + } + + beforeUseExperimentalRetryJoin := before.DeepCopy() + beforeUseExperimentalRetryJoin.Spec.KubeadmConfigSpec.UseExperimentalRetryJoin = true //nolint:staticcheck + updateUseExperimentalRetryJoin := before.DeepCopy() + updateUseExperimentalRetryJoin.Spec.KubeadmConfigSpec.UseExperimentalRetryJoin = false //nolint:staticcheck + tests := []struct { name string enableIgnitionFeature bool expectErr bool - before *KubeadmControlPlane - kcp *KubeadmControlPlane + before *controlplanev1.KubeadmControlPlane + kcp *controlplanev1.KubeadmControlPlane }{ { name: "should succeed when given a valid config", @@ -676,12 +718,6 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { before: before, kcp: validUpdate, }, - { - name: "should return error when trying to mutate the kubeadmconfigspec initconfiguration", - expectErr: true, - before: before, - kcp: invalidUpdateKubeadmConfigInit, - }, { name: "should not return an error when trying to mutate the kubeadmconfigspec initconfiguration noderegistration", expectErr: false, @@ -694,12 +730,6 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { before: before, kcp: invalidUpdateKubeadmConfigCluster, }, - { - name: "should return error when trying to mutate the kubeadmconfigspec joinconfiguration", - expectErr: true, - before: before, - kcp: invalidUpdateKubeadmConfigJoin, - }, { name: "should not return an error when trying to mutate the kubeadmconfigspec joinconfiguration noderegistration", expectErr: false, @@ -856,26 +886,26 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { kcp: imageRepository, }, { - name: "should fail when making a change to the cluster config's featureGates", - expectErr: true, + name: "should succeed when making a change to the cluster config's featureGates", + expectErr: false, before: before, kcp: featureGates, }, { - name: "should fail when making a change to the cluster config's local etcd's configuration localDataDir field", - expectErr: true, + name: "should succeed when making a change to the cluster config's local etcd's configuration localDataDir field", + expectErr: false, before: before, kcp: localDataDir, }, { - name: "should fail when making a change to the cluster config's local etcd's configuration localPeerCertSANs field", - expectErr: true, + name: "should succeed when making a change to the cluster config's local etcd's configuration localPeerCertSANs field", + expectErr: false, before: before, kcp: localPeerCertSANs, }, { - name: "should fail when making a change to the cluster config's local etcd's configuration localServerCertSANs field", - expectErr: true, + name: "should succeed when making a change to the cluster config's local etcd's configuration localServerCertSANs field", + expectErr: false, before: before, kcp: localServerCertSANs, }, @@ -886,8 +916,8 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { kcp: localExtraArgs, }, { - name: "should fail when making a change to the cluster config's external etcd's configuration", - expectErr: true, + name: "should succeed when making a change to the cluster config's external etcd's configuration", + expectErr: false, before: before, kcp: externalEtcd, }, @@ -897,12 +927,6 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { before: etcdLocalImageTag, kcp: unsetEtcd, }, - { - name: "should fail when modifying a field that is not the local etcd image metadata", - expectErr: true, - before: localDataDir, - kcp: modifyLocalDataDir, - }, { name: "should fail if both local and external etcd are set", expectErr: true, @@ -1001,6 +1025,26 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { before: validIgnitionConfigurationBefore, kcp: validIgnitionConfigurationAfter, }, + { + name: "should succeed when CloudInit was used before", + enableIgnitionFeature: true, + expectErr: false, + before: before, + kcp: switchFromCloudInitToIgnition, + }, + { + name: "should return error for invalid metadata", + enableIgnitionFeature: true, + expectErr: true, + before: before, + kcp: invalidMetadata, + }, + { + name: "should allow changes to useExperimentalRetryJoin", + expectErr: false, + before: beforeUseExperimentalRetryJoin, + kcp: updateUseExperimentalRetryJoin, + }, } for _, tt := range tests { @@ -1013,12 +1057,15 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { g := NewWithT(t) - err := tt.kcp.ValidateUpdate(tt.before.DeepCopy()) + webhook := &KubeadmControlPlane{} + + warnings, err := webhook.ValidateUpdate(ctx, tt.before.DeepCopy(), tt.kcp) if tt.expectErr { g.Expect(err).To(HaveOccurred()) } else { g.Expect(err).To(Succeed()) } + g.Expect(warnings).To(BeEmpty()) }) } } @@ -1161,8 +1208,8 @@ func TestValidateVersion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - kcp := KubeadmControlPlane{ - Spec: KubeadmControlPlaneSpec{ + kcpNew := controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: tt.clusterConfiguration, }, @@ -1170,7 +1217,18 @@ func TestValidateVersion(t *testing.T) { }, } - allErrs := kcp.validateVersion(tt.oldVersion) + kcpOld := controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: tt.clusterConfiguration, + }, + Version: tt.oldVersion, + }, + } + + webhook := &KubeadmControlPlane{} + + allErrs := webhook.validateVersion(&kcpOld, &kcpNew) if tt.expectErr { g.Expect(allErrs).ToNot(BeEmpty()) } else { @@ -1180,14 +1238,16 @@ func TestValidateVersion(t *testing.T) { } } func TestKubeadmControlPlaneValidateUpdateAfterDefaulting(t *testing.T) { - before := &KubeadmControlPlane{ + g := NewWithT(t) + + before := &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "foo", }, - Spec: KubeadmControlPlaneSpec{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.19.0", - MachineTemplate: KubeadmControlPlaneMachineTemplate{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ APIVersion: "test/v1alpha1", Kind: "UnknownInfraMachine", @@ -1199,13 +1259,14 @@ func TestKubeadmControlPlaneValidateUpdateAfterDefaulting(t *testing.T) { } afterDefault := before.DeepCopy() - afterDefault.Default() + webhook := &KubeadmControlPlane{} + g.Expect(webhook.Default(ctx, afterDefault)).To(Succeed()) tests := []struct { name string expectErr bool - before *KubeadmControlPlane - kcp *KubeadmControlPlane + before *controlplanev1.KubeadmControlPlane + kcp *controlplanev1.KubeadmControlPlane }{ { name: "update should succeed after defaulting", @@ -1218,17 +1279,21 @@ func TestKubeadmControlPlaneValidateUpdateAfterDefaulting(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.kcp.ValidateUpdate(tt.before.DeepCopy()) + + webhook := &KubeadmControlPlane{} + + warnings, err := webhook.ValidateUpdate(ctx, tt.before.DeepCopy(), tt.kcp) if tt.expectErr { g.Expect(err).To(HaveOccurred()) } else { g.Expect(err).To(Succeed()) g.Expect(tt.kcp.Spec.MachineTemplate.InfrastructureRef.Namespace).To(Equal(tt.before.Namespace)) g.Expect(tt.kcp.Spec.Version).To(Equal("v1.19.0")) - g.Expect(tt.kcp.Spec.RolloutStrategy.Type).To(Equal(RollingUpdateStrategyType)) + g.Expect(tt.kcp.Spec.RolloutStrategy.Type).To(Equal(controlplanev1.RollingUpdateStrategyType)) g.Expect(tt.kcp.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal).To(Equal(int32(1))) - g.Expect(tt.kcp.Spec.Replicas).To(Equal(pointer.Int32(1))) + g.Expect(tt.kcp.Spec.Replicas).To(Equal(ptr.To[int32](1))) } + g.Expect(warnings).To(BeEmpty()) }) } } diff --git a/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplanetemplate.go b/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplanetemplate.go new file mode 100644 index 000000000000..ad58d472be2a --- /dev/null +++ b/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplanetemplate.go @@ -0,0 +1,151 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhooks + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/internal/util/compare" +) + +func (webhook *KubeadmControlPlaneTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(&controlplanev1.KubeadmControlPlaneTemplate{}). + WithDefaulter(webhook). + WithValidator(webhook). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplanetemplate,mutating=false,failurePolicy=fail,groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanetemplates,versions=v1beta1,name=validation.kubeadmcontrolplanetemplate.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplanetemplate,mutating=true,failurePolicy=fail,groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanetemplates,versions=v1beta1,name=default.kubeadmcontrolplanetemplate.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +// KubeadmControlPlaneTemplate implements a validation and defaulting webhook for KubeadmControlPlaneTemplate. +type KubeadmControlPlaneTemplate struct{} + +var _ webhook.CustomValidator = &KubeadmControlPlaneTemplate{} +var _ webhook.CustomDefaulter = &KubeadmControlPlaneTemplate{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type. +func (webhook *KubeadmControlPlaneTemplate) Default(_ context.Context, obj runtime.Object) error { + k, ok := obj.(*controlplanev1.KubeadmControlPlaneTemplate) + if !ok { + return apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlaneTemplate but got a %T", obj)) + } + + k.Spec.Template.Spec.KubeadmConfigSpec.Default() + + k.Spec.Template.Spec.RolloutStrategy = defaultRolloutStrategy(k.Spec.Template.Spec.RolloutStrategy) + + return nil +} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (webhook *KubeadmControlPlaneTemplate) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + k, ok := obj.(*controlplanev1.KubeadmControlPlaneTemplate) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlaneTemplate but got a %T", obj)) + } + + // NOTE: KubeadmControlPlaneTemplate is behind ClusterTopology feature gate flag; the web hook + // must prevent creating new objects in case the feature flag is disabled. + if !feature.Gates.Enabled(feature.ClusterTopology) { + return nil, field.Forbidden( + field.NewPath("spec"), + "can be set only if the ClusterTopology feature flag is enabled", + ) + } + + spec := k.Spec.Template.Spec + allErrs := validateKubeadmControlPlaneTemplateResourceSpec(spec, field.NewPath("spec", "template", "spec")) + allErrs = append(allErrs, validateClusterConfiguration(nil, spec.KubeadmConfigSpec.ClusterConfiguration, field.NewPath("spec", "template", "spec", "kubeadmConfigSpec", "clusterConfiguration"))...) + allErrs = append(allErrs, spec.KubeadmConfigSpec.Validate(field.NewPath("spec", "template", "spec", "kubeadmConfigSpec"))...) + // Validate the metadata of the KubeadmControlPlaneTemplateResource + allErrs = append(allErrs, k.Spec.Template.ObjectMeta.Validate(field.NewPath("spec", "template", "metadata"))...) + if len(allErrs) > 0 { + return nil, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("KubeadmControlPlaneTemplate").GroupKind(), k.Name, allErrs) + } + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (webhook *KubeadmControlPlaneTemplate) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + + oldK, ok := oldObj.(*controlplanev1.KubeadmControlPlaneTemplate) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlaneTemplate but got a %T", oldObj)) + } + + newK, ok := newObj.(*controlplanev1.KubeadmControlPlaneTemplate) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlaneTemplate but got a %T", newObj)) + } + + if err := webhook.Default(ctx, oldK); err != nil { + return nil, apierrors.NewBadRequest(fmt.Sprintf("failed to compare old and new KubeadmControlPlaneTemplate: failed to default old object: %v", err)) + } + if err := webhook.Default(ctx, newK); err != nil { + return nil, apierrors.NewBadRequest(fmt.Sprintf("failed to compare old and new KubeadmControlPlaneTemplate: failed to default new object: %v", err)) + } + + equal, diff, err := compare.Diff(oldK.Spec.Template.Spec, newK.Spec.Template.Spec) + if err != nil { + return nil, apierrors.NewBadRequest(fmt.Sprintf("failed to compare old and new KubeadmControlPlaneTemplate: %v", err)) + } + if !equal { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "template", "spec"), newK, fmt.Sprintf("KubeadmControlPlaneTemplate spec.template.spec field is immutable. Please create new resource instead. Diff: %s", diff)), + ) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("KubeadmControlPlaneTemplate").GroupKind(), newK.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (webhook *KubeadmControlPlaneTemplate) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +// validateKubeadmControlPlaneTemplateResourceSpec is a copy of validateKubeadmControlPlaneSpec which +// only validates the fields in KubeadmControlPlaneTemplateResourceSpec we care about. +func validateKubeadmControlPlaneTemplateResourceSpec(s controlplanev1.KubeadmControlPlaneTemplateResourceSpec, pathPrefix *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, validateRolloutBefore(s.RolloutBefore, pathPrefix.Child("rolloutBefore"))...) + allErrs = append(allErrs, validateRolloutStrategy(s.RolloutStrategy, nil, pathPrefix.Child("rolloutStrategy"))...) + + if s.MachineTemplate != nil { + // Validate the metadata of the MachineTemplate + allErrs = append(allErrs, s.MachineTemplate.ObjectMeta.Validate(pathPrefix.Child("machineTemplate", "metadata"))...) + } + + return allErrs +} diff --git a/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplanetemplate_test.go b/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplanetemplate_test.go new file mode 100644 index 000000000000..7da19acdbb4a --- /dev/null +++ b/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplanetemplate_test.go @@ -0,0 +1,232 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhooks + +import ( + "strings" + "testing" + "time" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/component-base/featuregate/testing" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/internal/webhooks/util" +) + +func TestKubeadmControlPlaneTemplateDefault(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() + + g := NewWithT(t) + + kcpTemplate := &controlplanev1.KubeadmControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + }, + Spec: controlplanev1.KubeadmControlPlaneTemplateSpec{ + Template: controlplanev1.KubeadmControlPlaneTemplateResource{ + Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ + MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ + NodeDrainTimeout: &metav1.Duration{Duration: 10 * time.Second}, + }, + }, + }, + }, + } + updateDefaultingValidationKCPTemplate := kcpTemplate.DeepCopy() + updateDefaultingValidationKCPTemplate.Spec.Template.Spec.MachineTemplate.NodeDrainTimeout = &metav1.Duration{Duration: 20 * time.Second} + webhook := &KubeadmControlPlaneTemplate{} + t.Run("for KubeadmControlPlaneTemplate", util.CustomDefaultValidateTest(ctx, updateDefaultingValidationKCPTemplate, webhook)) + g.Expect(webhook.Default(ctx, kcpTemplate)).To(Succeed()) + + g.Expect(kcpTemplate.Spec.Template.Spec.KubeadmConfigSpec.Format).To(Equal(bootstrapv1.CloudConfig)) + g.Expect(kcpTemplate.Spec.Template.Spec.RolloutStrategy.Type).To(Equal(controlplanev1.RollingUpdateStrategyType)) + g.Expect(kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal).To(Equal(int32(1))) +} + +func TestKubeadmControlPlaneTemplateValidationFeatureGateEnabled(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() + + t.Run("create kubeadmcontrolplanetemplate should pass if gate enabled and valid kubeadmcontrolplanetemplate", func(t *testing.T) { + testnamespace := "test" + g := NewWithT(t) + kcpTemplate := &controlplanev1.KubeadmControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadmcontrolplanetemplate-test", + Namespace: testnamespace, + }, + Spec: controlplanev1.KubeadmControlPlaneTemplateSpec{ + Template: controlplanev1.KubeadmControlPlaneTemplateResource{ + Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ + MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ + NodeDrainTimeout: &metav1.Duration{Duration: time.Second}, + }, + }, + }, + }, + } + webhook := &KubeadmControlPlaneTemplate{} + warnings, err := webhook.ValidateCreate(ctx, kcpTemplate) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(warnings).To(BeEmpty()) + }) +} + +func TestKubeadmControlPlaneTemplateValidationFeatureGateDisabled(t *testing.T) { + // NOTE: ClusterTopology feature flag is disabled by default, thus preventing to create KubeadmControlPlaneTemplate. + t.Run("create kubeadmcontrolplanetemplate should not pass if gate disabled and valid kubeadmcontrolplanetemplate", func(t *testing.T) { + testnamespace := "test" + g := NewWithT(t) + kcpTemplate := &controlplanev1.KubeadmControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadmcontrolplanetemplate-test", + Namespace: testnamespace, + }, + Spec: controlplanev1.KubeadmControlPlaneTemplateSpec{ + Template: controlplanev1.KubeadmControlPlaneTemplateResource{ + Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ + MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ + NodeDrainTimeout: &metav1.Duration{Duration: time.Second}, + }, + }, + }, + }, + } + webhook := &KubeadmControlPlaneTemplate{} + warnings, err := webhook.ValidateCreate(ctx, kcpTemplate) + g.Expect(err).To(HaveOccurred()) + g.Expect(warnings).To(BeEmpty()) + }) +} + +func TestKubeadmControlPlaneTemplateValidationMetadata(t *testing.T) { + t.Run("create kubeadmcontrolplanetemplate should not pass if metadata is invalid", func(t *testing.T) { + g := NewWithT(t) + kcpTemplate := &controlplanev1.KubeadmControlPlaneTemplate{ + Spec: controlplanev1.KubeadmControlPlaneTemplateSpec{ + Template: controlplanev1.KubeadmControlPlaneTemplateResource{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: map[string]string{ + "foo": "$invalid-key", + "bar": strings.Repeat("a", 64) + "too-long-value", + "/invalid-key": "foo", + }, + Annotations: map[string]string{ + "/invalid-key": "foo", + }, + }, + Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ + MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: map[string]string{ + "foo": "$invalid-key", + "bar": strings.Repeat("a", 64) + "too-long-value", + "/invalid-key": "foo", + }, + Annotations: map[string]string{ + "/invalid-key": "foo", + }, + }, + }, + }, + }, + }, + } + webhook := &KubeadmControlPlaneTemplate{} + warnings, err := webhook.ValidateCreate(ctx, kcpTemplate) + g.Expect(err).To(HaveOccurred()) + g.Expect(warnings).To(BeEmpty()) + }) +} + +func TestKubeadmControlPlaneTemplateUpdateValidation(t *testing.T) { + t.Run("update KubeadmControlPlaneTemplate should pass if only defaulted fields are different", func(t *testing.T) { + g := NewWithT(t) + oldKCPTemplate := &controlplanev1.KubeadmControlPlaneTemplate{ + Spec: controlplanev1.KubeadmControlPlaneTemplateSpec{ + Template: controlplanev1.KubeadmControlPlaneTemplateResource{ + Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ + MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ + NodeDrainTimeout: &metav1.Duration{Duration: time.Duration(10) * time.Minute}, + }, + }, + }, + }, + } + newKCPTemplate := &controlplanev1.KubeadmControlPlaneTemplate{ + Spec: controlplanev1.KubeadmControlPlaneTemplateSpec{ + Template: controlplanev1.KubeadmControlPlaneTemplateResource{ + Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + // Only this field is different, but defaulting will set it as well, so this should pass the immutability check. + Format: bootstrapv1.CloudConfig, + }, + MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ + NodeDrainTimeout: &metav1.Duration{Duration: time.Duration(10) * time.Minute}, + }, + }, + }, + }, + } + webhook := &KubeadmControlPlaneTemplate{} + warnings, err := webhook.ValidateUpdate(ctx, oldKCPTemplate, newKCPTemplate) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(warnings).To(BeEmpty()) + }) + t.Run("update kubeadmcontrolplanetemplate should not pass if fields are different", func(t *testing.T) { + g := NewWithT(t) + oldKCPTemplate := &controlplanev1.KubeadmControlPlaneTemplate{ + Spec: controlplanev1.KubeadmControlPlaneTemplateSpec{ + Template: controlplanev1.KubeadmControlPlaneTemplateResource{ + Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ + MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ + NodeDrainTimeout: &metav1.Duration{Duration: time.Duration(10) * time.Minute}, + }, + }, + }, + }, + } + newKCPTemplate := &controlplanev1.KubeadmControlPlaneTemplate{ + Spec: controlplanev1.KubeadmControlPlaneTemplateSpec{ + Template: controlplanev1.KubeadmControlPlaneTemplateResource{ + Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + // Defaulting will set this field as well. + Format: bootstrapv1.CloudConfig, + // This will fail the immutability check. + PreKubeadmCommands: []string{ + "new-cmd", + }, + }, + MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ + NodeDrainTimeout: &metav1.Duration{Duration: time.Duration(10) * time.Minute}, + }, + }, + }, + }, + } + webhook := &KubeadmControlPlaneTemplate{} + warnings, err := webhook.ValidateUpdate(ctx, oldKCPTemplate, newKCPTemplate) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("KubeadmControlPlaneTemplate spec.template.spec field is immutable")) + g.Expect(warnings).To(BeEmpty()) + }) +} diff --git a/controlplane/kubeadm/internal/webhooks/scale.go b/controlplane/kubeadm/internal/webhooks/scale.go index 506b398afaff..4e90b23ac3fd 100644 --- a/controlplane/kubeadm/internal/webhooks/scale.go +++ b/controlplane/kubeadm/internal/webhooks/scale.go @@ -32,6 +32,8 @@ import ( ) func (v *ScaleValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { + v.decoder = admission.NewDecoder(mgr.GetScheme()) + mgr.GetWebhookServer().Register("/validate-scale-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplane", &webhook.Admission{ Handler: v, }) @@ -43,7 +45,7 @@ func (v *ScaleValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { // ScaleValidator validates KCP for replicas. type ScaleValidator struct { Client client.Reader - decoder *admission.Decoder + decoder admission.Decoder } // Handle will validate for number of replicas. @@ -80,11 +82,3 @@ func (v *ScaleValidator) Handle(ctx context.Context, req admission.Request) admi return admission.Allowed("") } - -// InjectDecoder injects the decoder. -// ScaleValidator implements admission.DecoderInjector. -// A decoder will be automatically injected. -func (v *ScaleValidator) InjectDecoder(d *admission.Decoder) error { - v.decoder = d - return nil -} diff --git a/controlplane/kubeadm/internal/webhooks/scale_test.go b/controlplane/kubeadm/internal/webhooks/scale_test.go index f074dcf20a3c..3cddb1a2bd89 100644 --- a/controlplane/kubeadm/internal/webhooks/scale_test.go +++ b/controlplane/kubeadm/internal/webhooks/scale_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -62,7 +62,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { }, NodeDrainTimeout: &metav1.Duration{Duration: time.Second}, }, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), RolloutStrategy: &controlplanev1.RolloutStrategy{ Type: controlplanev1.RollingUpdateStrategyType, RollingUpdate: &controlplanev1.RollingUpdate{ @@ -121,7 +121,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { }, NTP: &bootstrapv1.NTP{ Servers: []string{"test-server-1", "test-server-2"}, - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), }, }, Version: "v1.16.6", @@ -136,12 +136,12 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { name string admissionRequest admission.Request expectRespAllowed bool - expectRespReason string + expectRespMessage string }{ { name: "should return error when trying to scale to zero", expectRespAllowed: false, - expectRespReason: "replicas cannot be 0", + expectRespMessage: "replicas cannot be 0", admissionRequest: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{ UID: uuid.NewUUID(), Kind: metav1.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"}, @@ -152,7 +152,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { { name: "should return error when trying to scale to even number of replicas with managed etcd", expectRespAllowed: false, - expectRespReason: "replicas cannot be an even number when etcd is stacked", + expectRespMessage: "replicas cannot be an even number when etcd is stacked", admissionRequest: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{ UID: uuid.NewUUID(), Kind: metav1.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"}, @@ -163,7 +163,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { { name: "should allow odd number of replicas with managed etcd", expectRespAllowed: true, - expectRespReason: "", + expectRespMessage: "", admissionRequest: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{ UID: uuid.NewUUID(), Kind: metav1.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"}, @@ -174,7 +174,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { { name: "should allow even number of replicas with external etcd", expectRespAllowed: true, - expectRespReason: "", + expectRespMessage: "", admissionRequest: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{ UID: uuid.NewUUID(), Kind: metav1.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"}, @@ -185,7 +185,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { { name: "should allow odd number of replicas with external etcd", expectRespAllowed: true, - expectRespReason: "", + expectRespMessage: "", admissionRequest: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{ UID: uuid.NewUUID(), Kind: metav1.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"}, @@ -198,18 +198,17 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - decoder, _ := admission.NewDecoder(scheme) fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(kcpManagedEtcd, kcpExternalEtcd).Build() // Create the webhook and add the fakeClient as its client. scaleHandler := ScaleValidator{ Client: fakeClient, - decoder: decoder, + decoder: admission.NewDecoder(scheme), } resp := scaleHandler.Handle(context.Background(), tt.admissionRequest) g.Expect(resp.Allowed).Should(Equal(tt.expectRespAllowed)) - g.Expect(string(resp.Result.Reason)).Should(Equal(tt.expectRespReason)) + g.Expect(resp.Result.Message).Should(Equal(tt.expectRespMessage)) }) } } diff --git a/controlplane/kubeadm/internal/workload_cluster.go b/controlplane/kubeadm/internal/workload_cluster.go index 7ffacda66193..9034dd1e05e8 100644 --- a/controlplane/kubeadm/internal/workload_cluster.go +++ b/controlplane/kubeadm/internal/workload_cluster.go @@ -29,7 +29,7 @@ import ( "reflect" "time" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -105,13 +105,14 @@ type WorkloadCluster interface { // Upgrade related tasks. ReconcileKubeletRBACBinding(ctx context.Context, version semver.Version) error ReconcileKubeletRBACRole(ctx context.Context, version semver.Version) error - UpdateKubernetesVersionInKubeadmConfigMap(ctx context.Context, version semver.Version) error - UpdateImageRepositoryInKubeadmConfigMap(ctx context.Context, imageRepository string, version semver.Version) error - UpdateEtcdVersionInKubeadmConfigMap(ctx context.Context, imageRepository, imageTag string, version semver.Version) error - UpdateEtcdExtraArgsInKubeadmConfigMap(ctx context.Context, extraArgs map[string]string, version semver.Version) error - UpdateAPIServerInKubeadmConfigMap(ctx context.Context, apiServer bootstrapv1.APIServer, version semver.Version) error - UpdateControllerManagerInKubeadmConfigMap(ctx context.Context, controllerManager bootstrapv1.ControlPlaneComponent, version semver.Version) error - UpdateSchedulerInKubeadmConfigMap(ctx context.Context, scheduler bootstrapv1.ControlPlaneComponent, version semver.Version) error + UpdateKubernetesVersionInKubeadmConfigMap(version semver.Version) func(*bootstrapv1.ClusterConfiguration) + UpdateImageRepositoryInKubeadmConfigMap(imageRepository string) func(*bootstrapv1.ClusterConfiguration) + UpdateFeatureGatesInKubeadmConfigMap(featureGates map[string]bool) func(*bootstrapv1.ClusterConfiguration) + UpdateEtcdLocalInKubeadmConfigMap(localEtcd *bootstrapv1.LocalEtcd) func(*bootstrapv1.ClusterConfiguration) + UpdateEtcdExternalInKubeadmConfigMap(externalEtcd *bootstrapv1.ExternalEtcd) func(*bootstrapv1.ClusterConfiguration) + UpdateAPIServerInKubeadmConfigMap(apiServer bootstrapv1.APIServer) func(*bootstrapv1.ClusterConfiguration) + UpdateControllerManagerInKubeadmConfigMap(controllerManager bootstrapv1.ControlPlaneComponent) func(*bootstrapv1.ClusterConfiguration) + UpdateSchedulerInKubeadmConfigMap(scheduler bootstrapv1.ControlPlaneComponent) func(*bootstrapv1.ClusterConfiguration) UpdateKubeletConfigMap(ctx context.Context, version semver.Version) error UpdateKubeProxyImageInfo(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, version semver.Version) error UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, version semver.Version) error @@ -120,6 +121,8 @@ type WorkloadCluster interface { RemoveNodeFromKubeadmConfigMap(ctx context.Context, nodeName string, version semver.Version) error ForwardEtcdLeadership(ctx context.Context, machine *clusterv1.Machine, leaderCandidate *clusterv1.Machine) error AllowBootstrapTokensToGetNodes(ctx context.Context) error + AllowClusterAdminPermissions(ctx context.Context, version semver.Version) error + UpdateClusterConfiguration(ctx context.Context, version semver.Version, mutators ...func(*bootstrapv1.ClusterConfiguration)) error // State recovery tasks. ReconcileEtcdMembers(ctx context.Context, nodeNames []string, version semver.Version) ([]string, error) @@ -172,20 +175,30 @@ func (w *Workload) getConfigMap(ctx context.Context, configMap ctrlclient.Object } // UpdateImageRepositoryInKubeadmConfigMap updates the image repository in the kubeadm config map. -func (w *Workload) UpdateImageRepositoryInKubeadmConfigMap(ctx context.Context, imageRepository string, version semver.Version) error { - return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { +func (w *Workload) UpdateImageRepositoryInKubeadmConfigMap(imageRepository string) func(*bootstrapv1.ClusterConfiguration) { + return func(c *bootstrapv1.ClusterConfiguration) { if imageRepository == "" { return } + c.ImageRepository = imageRepository - }, version) + } +} + +// UpdateFeatureGatesInKubeadmConfigMap updates the feature gates in the kubeadm config map. +func (w *Workload) UpdateFeatureGatesInKubeadmConfigMap(featureGates map[string]bool) func(*bootstrapv1.ClusterConfiguration) { + return func(c *bootstrapv1.ClusterConfiguration) { + // Even if featureGates is nil, reset it to ClusterConfiguration + // to override any previously set feature gates. + c.FeatureGates = featureGates + } } // UpdateKubernetesVersionInKubeadmConfigMap updates the kubernetes version in the kubeadm config map. -func (w *Workload) UpdateKubernetesVersionInKubeadmConfigMap(ctx context.Context, version semver.Version) error { - return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { +func (w *Workload) UpdateKubernetesVersionInKubeadmConfigMap(version semver.Version) func(*bootstrapv1.ClusterConfiguration) { + return func(c *bootstrapv1.ClusterConfiguration) { c.KubernetesVersion = fmt.Sprintf("v%s", version.String()) - }, version) + } } // UpdateKubeletConfigMap will create a new kubelet-config-1.x config map for a new version of the kubelet. @@ -269,24 +282,24 @@ func (w *Workload) UpdateKubeletConfigMap(ctx context.Context, version semver.Ve } // UpdateAPIServerInKubeadmConfigMap updates api server configuration in kubeadm config map. -func (w *Workload) UpdateAPIServerInKubeadmConfigMap(ctx context.Context, apiServer bootstrapv1.APIServer, version semver.Version) error { - return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { +func (w *Workload) UpdateAPIServerInKubeadmConfigMap(apiServer bootstrapv1.APIServer) func(*bootstrapv1.ClusterConfiguration) { + return func(c *bootstrapv1.ClusterConfiguration) { c.APIServer = apiServer - }, version) + } } // UpdateControllerManagerInKubeadmConfigMap updates controller manager configuration in kubeadm config map. -func (w *Workload) UpdateControllerManagerInKubeadmConfigMap(ctx context.Context, controllerManager bootstrapv1.ControlPlaneComponent, version semver.Version) error { - return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { +func (w *Workload) UpdateControllerManagerInKubeadmConfigMap(controllerManager bootstrapv1.ControlPlaneComponent) func(*bootstrapv1.ClusterConfiguration) { + return func(c *bootstrapv1.ClusterConfiguration) { c.ControllerManager = controllerManager - }, version) + } } // UpdateSchedulerInKubeadmConfigMap updates scheduler configuration in kubeadm config map. -func (w *Workload) UpdateSchedulerInKubeadmConfigMap(ctx context.Context, scheduler bootstrapv1.ControlPlaneComponent, version semver.Version) error { - return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { +func (w *Workload) UpdateSchedulerInKubeadmConfigMap(scheduler bootstrapv1.ControlPlaneComponent) func(*bootstrapv1.ClusterConfiguration) { + return func(c *bootstrapv1.ClusterConfiguration) { c.Scheduler = scheduler - }, version) + } } // RemoveMachineFromKubeadmConfigMap removes the entry for the machine from the kubeadm configmap. @@ -349,11 +362,11 @@ func (w *Workload) updateClusterStatus(ctx context.Context, mutator func(status }) } -// updateClusterConfiguration gets the ClusterConfiguration kubeadm-config ConfigMap, converts it to the +// UpdateClusterConfiguration gets the ClusterConfiguration kubeadm-config ConfigMap, converts it to the // Cluster API representation, and then applies a mutation func; if changes are detected, the // data are converted back into the Kubeadm API version in use for the target Kubernetes version and the // kubeadm-config ConfigMap updated. -func (w *Workload) updateClusterConfiguration(ctx context.Context, mutator func(*bootstrapv1.ClusterConfiguration), version semver.Version) error { +func (w *Workload) UpdateClusterConfiguration(ctx context.Context, version semver.Version, mutators ...func(*bootstrapv1.ClusterConfiguration)) error { return retry.RetryOnConflict(retry.DefaultBackoff, func() error { key := ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem} configMap, err := w.getConfigMap(ctx, key) @@ -372,7 +385,9 @@ func (w *Workload) updateClusterConfiguration(ctx context.Context, mutator func( } updatedObj := currentObj.DeepCopy() - mutator(updatedObj) + for i := range mutators { + mutators[i](updatedObj) + } if !reflect.DeepEqual(currentObj, updatedObj) { updatedData, err := kubeadmtypes.MarshalClusterConfigurationForVersion(updatedObj, version) @@ -381,7 +396,7 @@ func (w *Workload) updateClusterConfiguration(ctx context.Context, mutator func( } configMap.Data[clusterConfigurationKey] = updatedData if err := w.Client.Update(ctx, configMap); err != nil { - return errors.Wrap(err, "failed to upgrade the kubeadmConfigMap") + return errors.Wrap(err, "failed to upgrade cluster configuration in the kubeadmConfigMap") } } return nil @@ -489,11 +504,7 @@ func calculateAPIServerPort(config *bootstrapv1.KubeadmConfig) int32 { return 6443 } -func generateClientCert(caCertEncoded, caKeyEncoded []byte) (tls.Certificate, error) { - privKey, err := certs.NewPrivateKey() - if err != nil { - return tls.Certificate{}, err - } +func generateClientCert(caCertEncoded, caKeyEncoded []byte, clientKey *rsa.PrivateKey) (tls.Certificate, error) { caCert, err := certs.DecodeCertPEM(caCertEncoded) if err != nil { return tls.Certificate{}, err @@ -502,11 +513,11 @@ func generateClientCert(caCertEncoded, caKeyEncoded []byte) (tls.Certificate, er if err != nil { return tls.Certificate{}, err } - x509Cert, err := newClientCert(caCert, privKey, caKey) + x509Cert, err := newClientCert(caCert, clientKey, caKey) if err != nil { return tls.Certificate{}, err } - return tls.X509KeyPair(certs.EncodeCertPEM(x509Cert), certs.EncodePrivateKeyPEM(privKey)) + return tls.X509KeyPair(certs.EncodeCertPEM(x509Cert), certs.EncodePrivateKeyPEM(clientKey)) } func newClientCert(caCert *x509.Certificate, key *rsa.PrivateKey, caKey crypto.Signer) (*x509.Certificate, error) { diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions.go b/controlplane/kubeadm/internal/workload_cluster_conditions.go index 610a6f6929b8..c54b1cdf617d 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions.go @@ -121,6 +121,10 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane // Retrieve the member and check for alarms. // NB. The member for this node always exists given forFirstAvailableNode(node) used above member := etcdutil.MemberForName(currentMembers, node.Name) + if member == nil { + conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "etcd member reports the cluster is composed by members %s, but the member itself (%s) is not included", etcdutil.MemberNames(currentMembers), node.Name) + continue + } if len(member.Alarms) > 0 { alarmList := []string{} for _, alarm := range member.Alarms { diff --git a/controlplane/kubeadm/internal/workload_cluster_coredns.go b/controlplane/kubeadm/internal/workload_cluster_coredns.go index 9ebf7eda9950..deb5d712d708 100644 --- a/controlplane/kubeadm/internal/workload_cluster_coredns.go +++ b/controlplane/kubeadm/internal/workload_cluster_coredns.go @@ -22,7 +22,7 @@ import ( "reflect" "strings" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/coredns/corefile-migration/migration" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" @@ -145,7 +145,7 @@ func (w *Workload) UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.Kubead } // Perform the upgrade. - if err := w.updateCoreDNSImageInfoInKubeadmConfigMap(ctx, &clusterConfig.DNS, version); err != nil { + if err := w.UpdateClusterConfiguration(ctx, version, w.updateCoreDNSImageInfoInKubeadmConfigMap(&clusterConfig.DNS)); err != nil { return err } if err := w.updateCoreDNSCorefile(ctx, info); err != nil { @@ -270,11 +270,11 @@ func (w *Workload) updateCoreDNSDeployment(ctx context.Context, info *coreDNSInf } // updateCoreDNSImageInfoInKubeadmConfigMap updates the kubernetes version in the kubeadm config map. -func (w *Workload) updateCoreDNSImageInfoInKubeadmConfigMap(ctx context.Context, dns *bootstrapv1.DNS, version semver.Version) error { - return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { +func (w *Workload) updateCoreDNSImageInfoInKubeadmConfigMap(dns *bootstrapv1.DNS) func(*bootstrapv1.ClusterConfiguration) { + return func(c *bootstrapv1.ClusterConfiguration) { c.DNS.ImageRepository = dns.ImageRepository c.DNS.ImageTag = dns.ImageTag - }, version) + } } // updateCoreDNSClusterRole updates the CoreDNS ClusterRole when necessary. diff --git a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go index bac6124e733b..fd68d0a1595e 100644 --- a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go @@ -19,7 +19,7 @@ package internal import ( "testing" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" "github.com/pkg/errors" @@ -32,7 +32,7 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/util/yaml" + utilyaml "sigs.k8s.io/cluster-api/util/yaml" ) func TestUpdateCoreDNS(t *testing.T) { @@ -124,7 +124,7 @@ func TestUpdateCoreDNS(t *testing.T) { Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - "ClusterConfiguration": yaml.Raw(` + "ClusterConfiguration": utilyaml.Raw(` apiServer: apiVersion: kubeadm.k8s.io/v1beta2 dns: @@ -140,7 +140,7 @@ func TestUpdateCoreDNS(t *testing.T) { Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - "ClusterConfiguration": yaml.Raw(` + "ClusterConfiguration": utilyaml.Raw(` apiServer: apiVersion: kubeadm.k8s.io/v1beta2 dns: @@ -567,7 +567,7 @@ func TestUpdateCoreDNS(t *testing.T) { g.Eventually(func() []rbacv1.PolicyRule { g.Expect(env.Get(ctx, client.ObjectKey{Name: coreDNSClusterRoleName}, &actualClusterRole)).To(Succeed()) return actualClusterRole.Rules - }, "5s").Should(Equal(tt.expectRules)) + }, "5s").Should(BeComparableTo(tt.expectRules)) } } }) @@ -760,7 +760,7 @@ func TestUpdateCoreDNSClusterRole(t *testing.T) { var actualClusterRole rbacv1.ClusterRole g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: coreDNSClusterRoleName, Namespace: metav1.NamespaceSystem}, &actualClusterRole)).To(Succeed()) - g.Expect(actualClusterRole.Rules).To(Equal(tt.expectCoreDNSPolicyRules)) + g.Expect(actualClusterRole.Rules).To(BeComparableTo(tt.expectCoreDNSPolicyRules)) }) } } @@ -1369,7 +1369,8 @@ func TestGetCoreDNSInfo(t *testing.T) { expectErr: true, }, } - for _, tt := range tests { + for i := range tests { + tt := tests[i] t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() @@ -1394,7 +1395,7 @@ func TestGetCoreDNSInfo(t *testing.T) { tt.expectedInfo.Corefile = expectedCorefile tt.expectedInfo.Deployment = actualDepl - g.Expect(actualInfo).To(Equal(&tt.expectedInfo)) + g.Expect(actualInfo).To(BeComparableTo(&tt.expectedInfo)) }) } }) @@ -1409,7 +1410,7 @@ func TestUpdateCoreDNSImageInfoInKubeadmConfigMap(t *testing.T) { }{ { name: "it should set the DNS image config", - clusterConfigurationData: yaml.Raw(` + clusterConfigurationData: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration `), @@ -1419,7 +1420,7 @@ func TestUpdateCoreDNSImageInfoInKubeadmConfigMap(t *testing.T) { ImageTag: "v1.2.3", }, }, - wantClusterConfiguration: yaml.Raw(` + wantClusterConfiguration: utilyaml.Raw(` apiServer: {} apiVersion: kubeadm.k8s.io/v1beta2 controllerManager: {} @@ -1433,7 +1434,8 @@ func TestUpdateCoreDNSImageInfoInKubeadmConfigMap(t *testing.T) { `), }, } - for _, tt := range tests { + for i := range tests { + tt := tests[i] t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ @@ -1449,7 +1451,7 @@ func TestUpdateCoreDNSImageInfoInKubeadmConfigMap(t *testing.T) { w := &Workload{ Client: fakeClient, } - err := w.updateCoreDNSImageInfoInKubeadmConfigMap(ctx, &tt.newDNS, semver.MustParse("1.19.1")) + err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.updateCoreDNSImageInfoInKubeadmConfigMap(&tt.newDNS)) g.Expect(err).ToNot(HaveOccurred()) var actualConfig corev1.ConfigMap @@ -1676,7 +1678,7 @@ func TestPatchCoreDNSDeploymentTolerations(t *testing.T) { patchCoreDNSDeploymentTolerations(d, tt.kubernetesVersion) - g.Expect(d.Spec.Template.Spec.Tolerations).To(Equal(tt.expectedTolerations)) + g.Expect(d.Spec.Template.Spec.Tolerations).To(BeComparableTo(tt.expectedTolerations)) }) } } diff --git a/controlplane/kubeadm/internal/workload_cluster_etcd.go b/controlplane/kubeadm/internal/workload_cluster_etcd.go index 3b3662a29f01..48c06bc3f567 100644 --- a/controlplane/kubeadm/internal/workload_cluster_etcd.go +++ b/controlplane/kubeadm/internal/workload_cluster_etcd.go @@ -19,7 +19,7 @@ package internal import ( "context" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/pkg/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" @@ -92,23 +92,22 @@ loopmembers: return removedMembers, errs } -// UpdateEtcdVersionInKubeadmConfigMap sets the imageRepository or the imageTag or both in the kubeadm config map. -func (w *Workload) UpdateEtcdVersionInKubeadmConfigMap(ctx context.Context, imageRepository, imageTag string, version semver.Version) error { - return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { +// UpdateEtcdLocalInKubeadmConfigMap sets etcd local configuration in the kubeadm config map. +func (w *Workload) UpdateEtcdLocalInKubeadmConfigMap(etcdLocal *bootstrapv1.LocalEtcd) func(*bootstrapv1.ClusterConfiguration) { + return func(c *bootstrapv1.ClusterConfiguration) { if c.Etcd.Local != nil { - c.Etcd.Local.ImageRepository = imageRepository - c.Etcd.Local.ImageTag = imageTag + c.Etcd.Local = etcdLocal } - }, version) + } } -// UpdateEtcdExtraArgsInKubeadmConfigMap sets extraArgs in the kubeadm config map. -func (w *Workload) UpdateEtcdExtraArgsInKubeadmConfigMap(ctx context.Context, extraArgs map[string]string, version semver.Version) error { - return w.updateClusterConfiguration(ctx, func(c *bootstrapv1.ClusterConfiguration) { - if c.Etcd.Local != nil { - c.Etcd.Local.ExtraArgs = extraArgs +// UpdateEtcdExternalInKubeadmConfigMap sets etcd external configuration in the kubeadm config map. +func (w *Workload) UpdateEtcdExternalInKubeadmConfigMap(etcdExternal *bootstrapv1.ExternalEtcd) func(*bootstrapv1.ClusterConfiguration) { + return func(c *bootstrapv1.ClusterConfiguration) { + if c.Etcd.External != nil { + c.Etcd.External = etcdExternal } - }, version) + } } // RemoveEtcdMemberForMachine removes the etcd member from the target cluster's etcd cluster. diff --git a/controlplane/kubeadm/internal/workload_cluster_etcd_test.go b/controlplane/kubeadm/internal/workload_cluster_etcd_test.go index 9972cd283b69..3c8f8736ae0d 100644 --- a/controlplane/kubeadm/internal/workload_cluster_etcd_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_etcd_test.go @@ -21,7 +21,7 @@ import ( "errors" "testing" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -32,58 +32,69 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" fake2 "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/fake" - "sigs.k8s.io/cluster-api/util/yaml" + utilyaml "sigs.k8s.io/cluster-api/util/yaml" ) -func TestUpdateEtcdVersionInKubeadmConfigMap(t *testing.T) { +func TestUpdateEtcdExternalInKubeadmConfigMap(t *testing.T) { tests := []struct { name string clusterConfigurationData string - newImageRepository string - newImageTag string + externalEtcd *bootstrapv1.ExternalEtcd wantClusterConfiguration string }{ { - name: "it should set etcd version when local etcd", - clusterConfigurationData: yaml.Raw(` + name: "it should set external etcd configuration with external etcd", + clusterConfigurationData: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration etcd: - local: {} + external: {} `), - newImageRepository: "example.com/k8s", - newImageTag: "v1.6.0", - wantClusterConfiguration: yaml.Raw(` + externalEtcd: &bootstrapv1.ExternalEtcd{ + Endpoints: []string{"1.2.3.4"}, + CAFile: "/tmp/ca_file.pem", + CertFile: "/tmp/cert_file.crt", + KeyFile: "/tmp/key_file.key", + }, + wantClusterConfiguration: utilyaml.Raw(` apiServer: {} apiVersion: kubeadm.k8s.io/v1beta2 controllerManager: {} dns: {} etcd: - local: - imageRepository: example.com/k8s - imageTag: v1.6.0 + external: + caFile: /tmp/ca_file.pem + certFile: /tmp/cert_file.crt + endpoints: + - 1.2.3.4 + keyFile: /tmp/key_file.key kind: ClusterConfiguration networking: {} scheduler: {} `), }, { - name: "no op when external etcd", - clusterConfigurationData: yaml.Raw(` + name: "no op when local etcd configuration already exists", + clusterConfigurationData: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration etcd: - external: {} + local: {} `), - newImageRepository: "example.com/k8s", - newImageTag: "v1.6.0", - wantClusterConfiguration: yaml.Raw(` + externalEtcd: &bootstrapv1.ExternalEtcd{ + Endpoints: []string{"1.2.3.4"}, + CAFile: "/tmp/ca_file.pem", + CertFile: "/tmp/cert_file.crt", + KeyFile: "/tmp/key_file.key", + }, + wantClusterConfiguration: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration etcd: - external: {} + local: {} `), }, } @@ -104,7 +115,7 @@ func TestUpdateEtcdVersionInKubeadmConfigMap(t *testing.T) { w := &Workload{ Client: fakeClient, } - err := w.UpdateEtcdVersionInKubeadmConfigMap(ctx, tt.newImageRepository, tt.newImageTag, semver.MustParse("1.19.1")) + err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateEtcdExternalInKubeadmConfigMap(tt.externalEtcd)) g.Expect(err).ToNot(HaveOccurred()) var actualConfig corev1.ConfigMap @@ -118,25 +129,31 @@ func TestUpdateEtcdVersionInKubeadmConfigMap(t *testing.T) { } } -func TestUpdateEtcdExtraArgsInKubeadmConfigMap(t *testing.T) { +func TestUpdateEtcdLocalInKubeadmConfigMap(t *testing.T) { tests := []struct { name string clusterConfigurationData string - newExtraArgs map[string]string + localEtcd *bootstrapv1.LocalEtcd wantClusterConfiguration string }{ { - name: "it should set etcd extraArgs when local etcd", - clusterConfigurationData: yaml.Raw(` + name: "it should set local etcd configuration with local etcd", + clusterConfigurationData: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration etcd: local: {} `), - newExtraArgs: map[string]string{ - "foo": "bar", + localEtcd: &bootstrapv1.LocalEtcd{ + ImageMeta: bootstrapv1.ImageMeta{ + ImageRepository: "example.com/k8s", + ImageTag: "v1.6.0", + }, + ExtraArgs: map[string]string{ + "foo": "bar", + }, }, - wantClusterConfiguration: yaml.Raw(` + wantClusterConfiguration: utilyaml.Raw(` apiServer: {} apiVersion: kubeadm.k8s.io/v1beta2 controllerManager: {} @@ -145,23 +162,31 @@ func TestUpdateEtcdExtraArgsInKubeadmConfigMap(t *testing.T) { local: extraArgs: foo: bar + imageRepository: example.com/k8s + imageTag: v1.6.0 kind: ClusterConfiguration networking: {} scheduler: {} `), }, { - name: "no op when external etcd", - clusterConfigurationData: yaml.Raw(` + name: "no op when external etcd configuration already exists", + clusterConfigurationData: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration etcd: external: {} `), - newExtraArgs: map[string]string{ - "foo": "bar", + localEtcd: &bootstrapv1.LocalEtcd{ + ImageMeta: bootstrapv1.ImageMeta{ + ImageRepository: "example.com/k8s", + ImageTag: "v1.6.0", + }, + ExtraArgs: map[string]string{ + "foo": "bar", + }, }, - wantClusterConfiguration: yaml.Raw(` + wantClusterConfiguration: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration etcd: @@ -186,7 +211,7 @@ func TestUpdateEtcdExtraArgsInKubeadmConfigMap(t *testing.T) { w := &Workload{ Client: fakeClient, } - err := w.UpdateEtcdExtraArgsInKubeadmConfigMap(ctx, tt.newExtraArgs, semver.MustParse("1.19.1")) + err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateEtcdLocalInKubeadmConfigMap(tt.localEtcd)) g.Expect(err).ToNot(HaveOccurred()) var actualConfig corev1.ConfigMap @@ -534,7 +559,7 @@ func TestReconcileEtcdMembers(t *testing.T) { Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterStatusKey: yaml.Raw(` + clusterStatusKey: utilyaml.Raw(` apiEndpoints: ip-10-0-0-1.ec2.internal: advertiseAddress: 10.0.0.1 @@ -610,7 +635,7 @@ func TestReconcileEtcdMembers(t *testing.T) { client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &actualConfig, )).To(Succeed()) - expectedOutput := yaml.Raw(` + expectedOutput := utilyaml.Raw(` apiEndpoints: ip-10-0-0-1.ec2.internal: advertiseAddress: 10.0.0.1 @@ -702,7 +727,7 @@ func TestRemoveNodeFromKubeadmConfigMap(t *testing.T) { { name: "removes the api endpoint", apiEndpoint: "ip-10-0-0-2.ec2.internal", - clusterStatusData: yaml.Raw(` + clusterStatusData: utilyaml.Raw(` apiEndpoints: ip-10-0-0-1.ec2.internal: advertiseAddress: 10.0.0.1 @@ -713,7 +738,7 @@ func TestRemoveNodeFromKubeadmConfigMap(t *testing.T) { apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterStatus `), - wantClusterStatus: yaml.Raw(` + wantClusterStatus: utilyaml.Raw(` apiEndpoints: ip-10-0-0-1.ec2.internal: advertiseAddress: 10.0.0.1 @@ -725,7 +750,7 @@ func TestRemoveNodeFromKubeadmConfigMap(t *testing.T) { { name: "no op if the api endpoint does not exists", apiEndpoint: "ip-10-0-0-2.ec2.internal", - clusterStatusData: yaml.Raw(` + clusterStatusData: utilyaml.Raw(` apiEndpoints: ip-10-0-0-1.ec2.internal: advertiseAddress: 10.0.0.1 @@ -733,7 +758,7 @@ func TestRemoveNodeFromKubeadmConfigMap(t *testing.T) { apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterStatus `), - wantClusterStatus: yaml.Raw(` + wantClusterStatus: utilyaml.Raw(` apiEndpoints: ip-10-0-0-1.ec2.internal: advertiseAddress: 10.0.0.1 diff --git a/controlplane/kubeadm/internal/workload_cluster_rbac.go b/controlplane/kubeadm/internal/workload_cluster_rbac.go index d714c6329c5f..8fc6c74c38ba 100644 --- a/controlplane/kubeadm/internal/workload_cluster_rbac.go +++ b/controlplane/kubeadm/internal/workload_cluster_rbac.go @@ -20,12 +20,14 @@ import ( "context" "fmt" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/pkg/errors" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" + + "sigs.k8s.io/cluster-api/util/version" ) const ( @@ -35,6 +37,11 @@ const ( // GetNodesClusterRoleName defines the name of the ClusterRole and ClusterRoleBinding to get nodes. GetNodesClusterRoleName = "kubeadm:get-nodes" + // ClusterAdminsGroupAndClusterRoleBinding is the name of the Group used for kubeadm generated cluster + // admin credentials and the name of the ClusterRoleBinding that binds the same Group to the "cluster-admin" + // built-in ClusterRole. + ClusterAdminsGroupAndClusterRoleBinding = "kubeadm:cluster-admins" + // NodesGroup defines the well-known group for all nodes. NodesGroup = "system:nodes" @@ -66,6 +73,33 @@ func (w *Workload) EnsureResource(ctx context.Context, obj client.Object) error return nil } +// AllowClusterAdminPermissions creates ClusterRoleBinding rules to use the kubeadm:cluster-admins Cluster Role created in Kubeadm v1.29. +func (w *Workload) AllowClusterAdminPermissions(ctx context.Context, targetVersion semver.Version) error { + // We intentionally only parse major/minor/patch so that the subsequent code + // also already applies to pre-release versions of new releases. + // Do nothing for Kubernetes < 1.29. + if version.Compare(targetVersion, semver.Version{Major: 1, Minor: 29, Patch: 0}, version.WithoutPreReleases()) < 0 { + return nil + } + return w.EnsureResource(ctx, &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: ClusterAdminsGroupAndClusterRoleBinding, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.GroupKind, + Name: ClusterAdminsGroupAndClusterRoleBinding, + }, + }, + }, + ) +} + // AllowBootstrapTokensToGetNodes creates RBAC rules to allow Node Bootstrap Tokens to list nodes. func (w *Workload) AllowBootstrapTokensToGetNodes(ctx context.Context) error { if err := w.EnsureResource(ctx, &rbacv1.ClusterRole{ diff --git a/controlplane/kubeadm/internal/workload_cluster_rbac_test.go b/controlplane/kubeadm/internal/workload_cluster_rbac_test.go index f4f93e3d6451..666f2829063d 100644 --- a/controlplane/kubeadm/internal/workload_cluster_rbac_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_rbac_test.go @@ -20,7 +20,7 @@ import ( "errors" "testing" - "github.com/blang/semver" + "github.com/blang/semver/v4" . "github.com/onsi/gomega" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -122,7 +122,7 @@ func TestCluster_ReconcileKubeletRBACBinding_NoError(t *testing.T) { // Role exists g.Expect(tt.client.Get(ctx, tt.want.role, r)).To(Succeed()) // Role ensure grants for the KubeletConfig config map - g.Expect(r.Rules).To(Equal([]rbacv1.PolicyRule{ + g.Expect(r.Rules).To(BeComparableTo([]rbacv1.PolicyRule{ { Verbs: []string{"get"}, APIGroups: []string{""}, diff --git a/controlplane/kubeadm/internal/workload_cluster_test.go b/controlplane/kubeadm/internal/workload_cluster_test.go index 63bbea586df2..eb475a89ca21 100644 --- a/controlplane/kubeadm/internal/workload_cluster_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_test.go @@ -21,7 +21,7 @@ import ( "errors" "testing" - "github.com/blang/semver" + "github.com/blang/semver/v4" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" @@ -30,12 +30,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/yaml" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/util/version" - "sigs.k8s.io/cluster-api/util/yaml" + utilyaml "sigs.k8s.io/cluster-api/util/yaml" ) func TestGetControlPlaneNodes(t *testing.T) { @@ -218,7 +219,8 @@ func TestUpdateKubeProxyImageInfo(t *testing.T) { }, } - for _, tt := range tests { + for i := range tests { + tt := tests[i] t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) @@ -235,11 +237,11 @@ func TestUpdateKubeProxyImageInfo(t *testing.T) { if tt.expectErr { gs.Expect(err).To(HaveOccurred()) } else { - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) } proxyImage, err := getProxyImageInfo(ctx, w.Client) - gs.Expect(err).NotTo(HaveOccurred()) + gs.Expect(err).ToNot(HaveOccurred()) if tt.expectImage != "" { gs.Expect(proxyImage).To(Equal(tt.expectImage)) } @@ -261,7 +263,7 @@ func TestRemoveMachineFromKubeadmConfigMap(t *testing.T) { Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterStatusKey: yaml.Raw(` + clusterStatusKey: utilyaml.Raw(` apiEndpoints: ip-10-0-0-1.ec2.internal: advertiseAddress: 10.0.0.1 @@ -326,7 +328,7 @@ func TestRemoveMachineFromKubeadmConfigMap(t *testing.T) { machine: machine, objs: []client.Object{kubeadmConfig}, expectErr: false, - expectedEndpoints: yaml.Raw(` + expectedEndpoints: utilyaml.Raw(` apiEndpoints: ip-10-0-0-2.ec2.internal: advertiseAddress: 10.0.0.2 @@ -396,7 +398,7 @@ func TestUpdateKubeletConfigMap(t *testing.T) { ResourceVersion: "some-resource-version", }, Data: map[string]string{ - kubeletConfigKey: yaml.Raw(` + kubeletConfigKey: utilyaml.Raw(` apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration foo: bar @@ -415,7 +417,7 @@ func TestUpdateKubeletConfigMap(t *testing.T) { ResourceVersion: "some-resource-version", }, Data: map[string]string{ - kubeletConfigKey: yaml.Raw(` + kubeletConfigKey: utilyaml.Raw(` apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration foo: bar @@ -434,7 +436,7 @@ func TestUpdateKubeletConfigMap(t *testing.T) { ResourceVersion: "some-resource-version", }, Data: map[string]string{ - kubeletConfigKey: yaml.Raw(` + kubeletConfigKey: utilyaml.Raw(` apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration foo: bar @@ -452,7 +454,7 @@ func TestUpdateKubeletConfigMap(t *testing.T) { ResourceVersion: "some-resource-version", }, Data: map[string]string{ - kubeletConfigKey: yaml.Raw(` + kubeletConfigKey: utilyaml.Raw(` apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration foo: bar @@ -472,7 +474,7 @@ func TestUpdateKubeletConfigMap(t *testing.T) { ResourceVersion: "some-resource-version", }, Data: map[string]string{ - kubeletConfigKey: yaml.Raw(` + kubeletConfigKey: utilyaml.Raw(` apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration cgroupDriver: cgroupfs @@ -575,21 +577,21 @@ func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterConfigurationKey: yaml.Raw(` + clusterConfigurationKey: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: v1.16.1 `), }, }}, - mutator: func(c *bootstrapv1.ClusterConfiguration) {}, + mutator: func(*bootstrapv1.ClusterConfiguration) {}, wantConfigMap: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterConfigurationKey: yaml.Raw(` + clusterConfigurationKey: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: v1.16.1 @@ -606,7 +608,7 @@ func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterConfigurationKey: yaml.Raw(` + clusterConfigurationKey: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: v1.16.1 @@ -622,7 +624,7 @@ func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterConfigurationKey: yaml.Raw(` + clusterConfigurationKey: utilyaml.Raw(` apiServer: {} apiVersion: kubeadm.k8s.io/v1beta2 controllerManager: {} @@ -638,22 +640,22 @@ func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { }, { name: "converts kubeadm api version during mutation if required", - version: semver.MustParse("1.17.2"), + version: semver.MustParse("1.28.0"), objs: []client.Object{&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterConfigurationKey: yaml.Raw(` - apiVersion: kubeadm.k8s.io/v1beta1 + clusterConfigurationKey: utilyaml.Raw(` + apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: v1.16.1 `), }, }}, mutator: func(c *bootstrapv1.ClusterConfiguration) { - c.KubernetesVersion = "v1.17.2" + c.KubernetesVersion = "v1.28.0" }, wantConfigMap: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -661,14 +663,14 @@ func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterConfigurationKey: yaml.Raw(` + clusterConfigurationKey: utilyaml.Raw(` apiServer: {} - apiVersion: kubeadm.k8s.io/v1beta2 + apiVersion: kubeadm.k8s.io/v1beta3 controllerManager: {} dns: {} etcd: {} kind: ClusterConfiguration - kubernetesVersion: v1.17.2 + kubernetesVersion: v1.28.0 networking: {} scheduler: {} `), @@ -685,7 +687,7 @@ func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { w := &Workload{ Client: fakeClient, } - err := w.updateClusterConfiguration(ctx, tt.mutator, tt.version) + err := w.UpdateClusterConfiguration(ctx, tt.version, tt.mutator) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -753,7 +755,7 @@ func TestUpdateUpdateClusterStatusInKubeadmConfigMap(t *testing.T) { Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterStatusKey: yaml.Raw(` + clusterStatusKey: utilyaml.Raw(` apiEndpoints: ip-10-0-0-1.ec2.internal: advertiseAddress: 10.0.0.1 @@ -763,14 +765,14 @@ func TestUpdateUpdateClusterStatusInKubeadmConfigMap(t *testing.T) { `), }, }}, - mutator: func(status *bootstrapv1.ClusterStatus) {}, + mutator: func(*bootstrapv1.ClusterStatus) {}, wantConfigMap: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterStatusKey: yaml.Raw(` + clusterStatusKey: utilyaml.Raw(` apiEndpoints: ip-10-0-0-1.ec2.internal: advertiseAddress: 10.0.0.1 @@ -790,55 +792,15 @@ func TestUpdateUpdateClusterStatusInKubeadmConfigMap(t *testing.T) { Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterStatusKey: yaml.Raw(` - apiEndpoints: - ip-10-0-0-1.ec2.internal: - advertiseAddress: 10.0.0.1 - bindPort: 6443 - apiVersion: kubeadm.k8s.io/v1beta2 - kind: ClusterStatus - `), - }, - }}, - mutator: func(status *bootstrapv1.ClusterStatus) { - status.APIEndpoints["ip-10-0-0-2.ec2.internal"] = bootstrapv1.APIEndpoint{} - }, - wantConfigMap: &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmConfigKey, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - clusterStatusKey: yaml.Raw(` + clusterStatusKey: utilyaml.Raw(` apiEndpoints: ip-10-0-0-1.ec2.internal: advertiseAddress: 10.0.0.1 bindPort: 6443 - ip-10-0-0-2.ec2.internal: {} apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterStatus `), }, - }, - }, - { - name: "converts kubeadm api version during mutation if required", - version: semver.MustParse("1.17.2"), - objs: []client.Object{&corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmConfigKey, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - clusterStatusKey: yaml.Raw(` - apiEndpoints: - ip-10-0-0-1.ec2.internal: - advertiseAddress: 10.0.0.1 - bindPort: 6443 - apiVersion: kubeadm.k8s.io/v1beta1 - kind: ClusterStatus - `), - }, }}, mutator: func(status *bootstrapv1.ClusterStatus) { status.APIEndpoints["ip-10-0-0-2.ec2.internal"] = bootstrapv1.APIEndpoint{} @@ -849,7 +811,7 @@ func TestUpdateUpdateClusterStatusInKubeadmConfigMap(t *testing.T) { Namespace: metav1.NamespaceSystem, }, Data: map[string]string{ - clusterStatusKey: yaml.Raw(` + clusterStatusKey: utilyaml.Raw(` apiEndpoints: ip-10-0-0-1.ec2.internal: advertiseAddress: 10.0.0.1 @@ -898,8 +860,8 @@ func TestUpdateKubernetesVersionInKubeadmConfigMap(t *testing.T) { { name: "updates the config map and changes the kubeadm API version", version: semver.MustParse("1.17.2"), - clusterConfigurationData: yaml.Raw(` - apiVersion: kubeadm.k8s.io/v1beta1 + clusterConfigurationData: utilyaml.Raw(` + apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: v1.16.1`), }, @@ -921,7 +883,8 @@ func TestUpdateKubernetesVersionInKubeadmConfigMap(t *testing.T) { w := &Workload{ Client: fakeClient, } - err := w.UpdateKubernetesVersionInKubeadmConfigMap(ctx, tt.version) + + err := w.UpdateClusterConfiguration(ctx, tt.version, w.UpdateKubernetesVersionInKubeadmConfigMap(tt.version)) g.Expect(err).ToNot(HaveOccurred()) var actualConfig corev1.ConfigMap @@ -944,7 +907,7 @@ func TestUpdateImageRepositoryInKubeadmConfigMap(t *testing.T) { }{ { name: "it should set the image repository", - clusterConfigurationData: yaml.Raw(` + clusterConfigurationData: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration`), newImageRepository: "example.com/k8s", @@ -952,7 +915,7 @@ func TestUpdateImageRepositoryInKubeadmConfigMap(t *testing.T) { }, { name: "it should preserve the existing image repository if then new value is empty", - clusterConfigurationData: yaml.Raw(` + clusterConfigurationData: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration imageRepository: foo.bar/baz.io`), @@ -977,7 +940,7 @@ func TestUpdateImageRepositoryInKubeadmConfigMap(t *testing.T) { w := &Workload{ Client: fakeClient, } - err := w.UpdateImageRepositoryInKubeadmConfigMap(ctx, tt.newImageRepository, semver.MustParse("1.19.1")) + err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateImageRepositoryInKubeadmConfigMap(tt.newImageRepository)) g.Expect(err).ToNot(HaveOccurred()) var actualConfig corev1.ConfigMap @@ -1000,7 +963,7 @@ func TestUpdateApiServerInKubeadmConfigMap(t *testing.T) { }{ { name: "it should set the api server config", - clusterConfigurationData: yaml.Raw(` + clusterConfigurationData: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration `), @@ -1019,7 +982,7 @@ func TestUpdateApiServerInKubeadmConfigMap(t *testing.T) { }, }, }, - wantClusterConfiguration: yaml.Raw(` + wantClusterConfiguration: utilyaml.Raw(` apiServer: extraArgs: bar: baz @@ -1055,7 +1018,7 @@ func TestUpdateApiServerInKubeadmConfigMap(t *testing.T) { w := &Workload{ Client: fakeClient, } - err := w.UpdateAPIServerInKubeadmConfigMap(ctx, tt.newAPIServer, semver.MustParse("1.19.1")) + err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateAPIServerInKubeadmConfigMap(tt.newAPIServer)) g.Expect(err).ToNot(HaveOccurred()) var actualConfig corev1.ConfigMap @@ -1078,7 +1041,7 @@ func TestUpdateControllerManagerInKubeadmConfigMap(t *testing.T) { }{ { name: "it should set the controller manager config", - clusterConfigurationData: yaml.Raw(` + clusterConfigurationData: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration `), @@ -1095,7 +1058,7 @@ func TestUpdateControllerManagerInKubeadmConfigMap(t *testing.T) { }, }, }, - wantClusterConfiguration: yaml.Raw(` + wantClusterConfiguration: utilyaml.Raw(` apiServer: {} apiVersion: kubeadm.k8s.io/v1beta2 controllerManager: @@ -1131,7 +1094,7 @@ func TestUpdateControllerManagerInKubeadmConfigMap(t *testing.T) { w := &Workload{ Client: fakeClient, } - err := w.UpdateControllerManagerInKubeadmConfigMap(ctx, tt.newControllerManager, semver.MustParse("1.19.1")) + err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateControllerManagerInKubeadmConfigMap(tt.newControllerManager)) g.Expect(err).ToNot(HaveOccurred()) var actualConfig corev1.ConfigMap @@ -1154,7 +1117,7 @@ func TestUpdateSchedulerInKubeadmConfigMap(t *testing.T) { }{ { name: "it should set the scheduler config", - clusterConfigurationData: yaml.Raw(` + clusterConfigurationData: utilyaml.Raw(` apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration `), @@ -1171,7 +1134,7 @@ func TestUpdateSchedulerInKubeadmConfigMap(t *testing.T) { }, }, }, - wantClusterConfiguration: yaml.Raw(` + wantClusterConfiguration: utilyaml.Raw(` apiServer: {} apiVersion: kubeadm.k8s.io/v1beta2 controllerManager: {} @@ -1206,7 +1169,7 @@ func TestUpdateSchedulerInKubeadmConfigMap(t *testing.T) { w := &Workload{ Client: fakeClient, } - err := w.UpdateSchedulerInKubeadmConfigMap(ctx, tt.newScheduler, semver.MustParse("1.19.1")) + err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateSchedulerInKubeadmConfigMap(tt.newScheduler)) g.Expect(err).ToNot(HaveOccurred()) var actualConfig corev1.ConfigMap @@ -1299,6 +1262,70 @@ func TestClusterStatus(t *testing.T) { } } +func TestUpdateFeatureGatesInKubeadmConfigMap(t *testing.T) { + tests := []struct { + name string + clusterConfigurationData string + newFeatureGates map[string]bool + wantFeatureGates map[string]bool + }{ + { + name: "it updates feature gates", + clusterConfigurationData: utilyaml.Raw(` + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration`), + newFeatureGates: map[string]bool{"EtcdLearnerMode": true}, + wantFeatureGates: map[string]bool{"EtcdLearnerMode": true}, + }, + { + name: "it should override feature gates even if new value is nil", + clusterConfigurationData: utilyaml.Raw(` + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + featureGates: + EtcdLearnerMode: true + `), + newFeatureGates: nil, + wantFeatureGates: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfigKey, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + clusterConfigurationKey: tt.clusterConfigurationData, + }, + }).Build() + + w := &Workload{ + Client: fakeClient, + } + err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateFeatureGatesInKubeadmConfigMap(tt.newFeatureGates)) + g.Expect(err).ToNot(HaveOccurred()) + + var actualConfig corev1.ConfigMap + g.Expect(w.Client.Get( + ctx, + client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, + &actualConfig, + )).To(Succeed()) + + actualConfiguration := bootstrapv1.ClusterConfiguration{} + err = yaml.Unmarshal([]byte(actualConfig.Data[clusterConfigurationKey]), &actualConfiguration) + if err != nil { + return + } + g.Expect(actualConfiguration.FeatureGates).Should(Equal(tt.wantFeatureGates)) + }) + } +} + func getProxyImageInfo(ctx context.Context, c client.Client) (string, error) { ds := &appsv1.DaemonSet{} diff --git a/controlplane/kubeadm/main.go b/controlplane/kubeadm/main.go index bc41b82db800..34c84c077927 100644 --- a/controlplane/kubeadm/main.go +++ b/controlplane/kubeadm/main.go @@ -21,18 +21,17 @@ import ( "context" "flag" "fmt" - "math/rand" - "net/http" - _ "net/http/pprof" "os" + goruntime "runtime" "time" - // +kubebuilder:scaffold:imports "github.com/spf13/pflag" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/leaderelection/resourcelock" cliflag "k8s.io/component-base/cli/flag" @@ -41,68 +40,74 @@ import ( _ "k8s.io/component-base/logs/json/register" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/webhook" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/remote" - controlplanev1alpha3 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" - controlplanev1alpha4 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" kubeadmcontrolplanecontrollers "sigs.k8s.io/cluster-api/controlplane/kubeadm/controllers" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" kcpwebhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/webhooks" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" + controlplanev1alpha3 "sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha3" + controlplanev1alpha4 "sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha4" "sigs.k8s.io/cluster-api/util/flags" "sigs.k8s.io/cluster-api/version" ) var ( - scheme = runtime.NewScheme() - setupLog = ctrl.Log.WithName("setup") + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + controllerName = "cluster-api-kubeadm-control-plane-manager" + + // flags. + enableLeaderElection bool + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + watchFilterValue string + watchNamespace string + profilerAddress string + enableContentionProfiling bool + syncPeriod time.Duration + restConfigQPS float32 + restConfigBurst int + webhookPort int + webhookCertDir string + webhookCertName string + webhookKeyName string + healthAddr string + tlsOptions = flags.TLSOptions{} + diagnosticsOptions = flags.DiagnosticsOptions{} + logOptions = logs.NewOptions() + // KCP specific flags. + kubeadmControlPlaneConcurrency int + clusterCacheTrackerConcurrency int + etcdDialTimeout time.Duration + etcdCallTimeout time.Duration + useDeprecatedInfraMachineNaming bool ) func init() { - klog.InitFlags(nil) - _ = clientgoscheme.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) + _ = expv1.AddToScheme(scheme) _ = controlplanev1alpha3.AddToScheme(scheme) _ = controlplanev1alpha4.AddToScheme(scheme) _ = controlplanev1.AddToScheme(scheme) _ = bootstrapv1.AddToScheme(scheme) _ = apiextensionsv1.AddToScheme(scheme) - // +kubebuilder:scaffold:scheme } -var ( - metricsBindAddr string - enableLeaderElection bool - leaderElectionLeaseDuration time.Duration - leaderElectionRenewDeadline time.Duration - leaderElectionRetryPeriod time.Duration - watchFilterValue string - watchNamespace string - profilerAddress string - kubeadmControlPlaneConcurrency int - syncPeriod time.Duration - webhookPort int - webhookCertDir string - healthAddr string - etcdDialTimeout time.Duration - etcdCallTimeout time.Duration - tlsOptions = flags.TLSOptions{} - logOptions = logs.NewOptions() -) - // InitFlags initializes the flags. func InitFlags(fs *pflag.FlagSet) { logsv1.AddFlags(logOptions, fs) - fs.StringVar(&metricsBindAddr, "metrics-bind-addr", "localhost:8080", - "The address the metric endpoint binds to.") - fs.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") @@ -118,23 +123,41 @@ func InitFlags(fs *pflag.FlagSet) { fs.StringVar(&watchNamespace, "namespace", "", "Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.") + fs.StringVar(&watchFilterValue, "watch-filter", "", + fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel)) + fs.StringVar(&profilerAddress, "profiler-address", "", "Bind address to expose the pprof profiler (e.g. localhost:6060)") + fs.BoolVar(&enableContentionProfiling, "contention-profiling", false, + "Enable block profiling") + fs.IntVar(&kubeadmControlPlaneConcurrency, "kubeadmcontrolplane-concurrency", 10, "Number of kubeadm control planes to process simultaneously") + fs.IntVar(&clusterCacheTrackerConcurrency, "clustercachetracker-concurrency", 10, + "Number of clusters to process simultaneously") + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "The minimum interval at which watched resources are reconciled (e.g. 15m)") - fs.StringVar(&watchFilterValue, "watch-filter", "", - fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel)) + fs.Float32Var(&restConfigQPS, "kube-api-qps", 20, + "Maximum queries per second from the controller client to the Kubernetes API server. Defaults to 20") + + fs.IntVar(&restConfigBurst, "kube-api-burst", 30, + "Maximum number of queries that should be allowed in one burst from the controller client to the Kubernetes API server. Default 30") fs.IntVar(&webhookPort, "webhook-port", 9443, "Webhook Server port") fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", - "Webhook cert dir, only used when webhook-port is specified.") + "Webhook cert dir.") + + fs.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", + "Webhook cert name.") + + fs.StringVar(&webhookKeyName, "webhook-key-name", "tls.key", + "Webhook key name.") fs.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") @@ -145,16 +168,29 @@ func InitFlags(fs *pflag.FlagSet) { fs.DurationVar(&etcdCallTimeout, "etcd-call-timeout-duration", etcd.DefaultCallTimeout, "Duration that the etcd client waits at most for read and write operations to etcd.") + fs.BoolVar(&useDeprecatedInfraMachineNaming, "use-deprecated-infra-machine-naming", false, + "Use the deprecated naming convention for infra machines where they are named after the InfraMachineTemplate.") + _ = fs.MarkDeprecated("use-deprecated-infra-machine-naming", "This flag will be removed in v1.9.") + + flags.AddDiagnosticsOptions(fs, &diagnosticsOptions) flags.AddTLSOptions(fs, &tlsOptions) feature.MutableGates.AddFlag(fs) } -func main() { - rand.Seed(time.Now().UnixNano()) +// Add RBAC for the authorized diagnostics endpoint. +// +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create +// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create + +func main() { InitFlags(pflag.CommandLine) pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + // Set log level 2 as default. + if err := pflag.CommandLine.Set("v", "2"); err != nil { + setupLog.Error(err, "failed to set default log level") + os.Exit(1) + } pflag.Parse() if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { @@ -165,18 +201,10 @@ func main() { // klog.Background will automatically use the right logger. ctrl.SetLogger(klog.Background()) - if profilerAddress != "" { - setupLog.Info(fmt.Sprintf("Profiler listening for requests at %s", profilerAddress)) - go func() { - srv := http.Server{Addr: profilerAddress, ReadHeaderTimeout: 2 * time.Second} - if err := srv.ListenAndServe(); err != nil { - setupLog.Error(err, "problem running profiler server") - } - }() - } - restConfig := ctrl.GetConfigOrDie() - restConfig.UserAgent = remote.DefaultClusterAPIUserAgent("cluster-api-kubeadm-control-plane-manager") + restConfig.QPS = restConfigQPS + restConfig.Burst = restConfigBurst + restConfig.UserAgent = remote.DefaultClusterAPIUserAgent(controllerName) tlsOptionOverrides, err := flags.GetTLSOptionOverrideFuncs(tlsOptions) if err != nil { @@ -184,26 +212,70 @@ func main() { os.Exit(1) } - mgr, err := ctrl.NewManager(restConfig, ctrl.Options{ + diagnosticsOpts := flags.GetDiagnosticsOptions(diagnosticsOptions) + + var watchNamespaces map[string]cache.Config + if watchNamespace != "" { + watchNamespaces = map[string]cache.Config{ + watchNamespace: {}, + } + } + + if enableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + + req, _ := labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Exists, nil) + clusterSecretCacheSelector := labels.NewSelector().Add(*req) + + ctrlOptions := ctrl.Options{ Scheme: scheme, - MetricsBindAddress: metricsBindAddr, LeaderElection: enableLeaderElection, LeaderElectionID: "kubeadm-control-plane-manager-leader-election-capi", LeaseDuration: &leaderElectionLeaseDuration, RenewDeadline: &leaderElectionRenewDeadline, RetryPeriod: &leaderElectionRetryPeriod, LeaderElectionResourceLock: resourcelock.LeasesResourceLock, - Namespace: watchNamespace, - SyncPeriod: &syncPeriod, - ClientDisableCacheFor: []client.Object{ - &corev1.ConfigMap{}, - &corev1.Secret{}, + HealthProbeBindAddress: healthAddr, + PprofBindAddress: profilerAddress, + Metrics: diagnosticsOpts, + Cache: cache.Options{ + DefaultNamespaces: watchNamespaces, + SyncPeriod: &syncPeriod, + ByObject: map[client.Object]cache.ByObject{ + // Note: Only Secrets with the cluster name label are cached. + // The default client of the manager won't use the cache for secrets at all (see Client.Cache.DisableFor). + // The cached secrets will only be used by the secretCachingClient we create below. + &corev1.Secret{}: { + Label: clusterSecretCacheSelector, + }, + }, }, - Port: webhookPort, - HealthProbeBindAddress: healthAddr, - CertDir: webhookCertDir, - TLSOpts: tlsOptionOverrides, - }) + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + // This config ensures that the default client caches Unstructured objects. + // KCP is only using Unstructured to retrieve InfraMachines and InfraMachineTemplates. + // As the cache should be used in those cases, caching is configured globally instead of + // creating a separate client that caches Unstructured. + Unstructured: true, + }, + }, + WebhookServer: webhook.NewServer( + webhook.Options{ + Port: webhookPort, + CertDir: webhookCertDir, + CertName: webhookCertName, + KeyName: webhookKeyName, + TLSOpts: tlsOptionOverrides, + }, + ), + } + + mgr, err := ctrl.NewManager(restConfig, ctrlOptions) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) @@ -216,8 +288,7 @@ func main() { setupReconcilers(ctx, mgr) setupWebhooks(mgr) - // +kubebuilder:scaffold:builder - setupLog.Info("starting manager", "version", version.Get().String()) + setupLog.Info("Starting manager", "version", version.Get().String()) if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) @@ -237,12 +308,23 @@ func setupChecks(mgr ctrl.Manager) { } func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { + secretCachingClient, err := client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + }, + }) + if err != nil { + setupLog.Error(err, "unable to create secret caching client") + os.Exit(1) + } + // Set up a ClusterCacheTracker to provide to controllers // requiring a connection to a remote cluster - log := ctrl.Log.WithName("remote").WithName("ClusterCacheTracker") tracker, err := remote.NewClusterCacheTracker(mgr, remote.ClusterCacheTrackerOptions{ - Log: &log, - Indexes: remote.DefaultIndexes, + SecretCachingClient: secretCachingClient, + ControllerName: controllerName, + Log: &ctrl.Log, ClientUncachedObjects: []client.Object{ &corev1.ConfigMap{}, &corev1.Secret{}, @@ -259,18 +341,19 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { Client: mgr.GetClient(), Tracker: tracker, WatchFilterValue: watchFilterValue, - }).SetupWithManager(ctx, mgr, concurrency(kubeadmControlPlaneConcurrency)); err != nil { + }).SetupWithManager(ctx, mgr, concurrency(clusterCacheTrackerConcurrency)); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ClusterCacheReconciler") os.Exit(1) } if err := (&kubeadmcontrolplanecontrollers.KubeadmControlPlaneReconciler{ - Client: mgr.GetClient(), - APIReader: mgr.GetAPIReader(), - Tracker: tracker, - WatchFilterValue: watchFilterValue, - EtcdDialTimeout: etcdDialTimeout, - EtcdCallTimeout: etcdCallTimeout, + Client: mgr.GetClient(), + SecretCachingClient: secretCachingClient, + Tracker: tracker, + WatchFilterValue: watchFilterValue, + EtcdDialTimeout: etcdDialTimeout, + EtcdCallTimeout: etcdCallTimeout, + DeprecatedInfraMachineNaming: useDeprecatedInfraMachineNaming, }).SetupWithManager(ctx, mgr, concurrency(kubeadmControlPlaneConcurrency)); err != nil { setupLog.Error(err, "unable to create controller", "controller", "KubeadmControlPlane") os.Exit(1) @@ -278,7 +361,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { } func setupWebhooks(mgr ctrl.Manager) { - if err := (&controlplanev1.KubeadmControlPlane{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&kcpwebhooks.KubeadmControlPlane{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "KubeadmControlPlane") os.Exit(1) } @@ -290,7 +373,7 @@ func setupWebhooks(mgr ctrl.Manager) { os.Exit(1) } - if err := (&controlplanev1.KubeadmControlPlaneTemplate{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&kcpwebhooks.KubeadmControlPlaneTemplate{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "KubeadmControlPlaneTemplate") os.Exit(1) } diff --git a/controlplane/kubeadm/webhooks/alias.go b/controlplane/kubeadm/webhooks/alias.go index 8890a6d465ad..ddc3e7441041 100644 --- a/controlplane/kubeadm/webhooks/alias.go +++ b/controlplane/kubeadm/webhooks/alias.go @@ -34,3 +34,19 @@ func (v *ScaleValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { Client: v.Client, }).SetupWebhookWithManager(mgr) } + +// KubeadmControlPlane implements a validating and defaulting webhook for KubeadmControlPlane. +type KubeadmControlPlane struct{} + +// SetupWebhookWithManager sets up KubeadmControlPlane webhooks. +func (webhook *KubeadmControlPlane) SetupWebhookWithManager(mgr ctrl.Manager) error { + return (&webhooks.KubeadmControlPlane{}).SetupWebhookWithManager(mgr) +} + +// KubeadmControlPlaneTemplate implements a validating and defaulting webhook for KubeadmControlPlaneTemplate. +type KubeadmControlPlaneTemplate struct{} + +// SetupWebhookWithManager sets up KubeadmControlPlaneTemplate webhooks. +func (webhook *KubeadmControlPlaneTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { + return (&webhooks.KubeadmControlPlaneTemplate{}).SetupWebhookWithManager(mgr) +} diff --git a/docs/Dockerfile b/docs/Dockerfile deleted file mode 100644 index a606e65866d3..000000000000 --- a/docs/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -# syntax=docker/dockerfile:1.4 - -# Copyright 2019 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# To generate diagrams run 'make diagrams' in the docs/ folder. -FROM maven:3-jdk-8 - -RUN apt-get update && apt-get install -y --no-install-recommends graphviz=2.42.2-5 fonts-symbola=2.60-1.1 fonts-wqy-zenhei=0.9.45-8 && rm -rf /var/lib/apt/lists/* -RUN wget -nv -O /plantuml.jar https://github.com/plantuml/plantuml/releases/download/v1.2022.6/plantuml-1.2022.6.jar - -# By default, java writes a 'hsperfdata_' directory in the work dir. -# This directory is not needed; to ensure it is not written, we set `-XX:-UsePerfData` -ENTRYPOINT [ "java", "-XX:-UsePerfData", "-jar", "/plantuml.jar" ] diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 16985daf1205..000000000000 --- a/docs/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2019 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -SHELL:=/usr/bin/env bash - -.DEFAULT_GOAL:=help - -DIAGRAM_SRCS := $(call rwildcard,.,*.md,*.plantuml) - -# Hosts running SELinux need :z added to volume mounts -SELINUX_ENABLED := $(shell cat /sys/fs/selinux/enforce 2> /dev/null || echo 0) - -ifeq ($(SELINUX_ENABLED),1) - DOCKER_VOL_OPTS?=:z -endif - -##@ PlantUML diagrams - -.PHONY: diagrams -diagrams: plantuml-builder diagrams-book diagrams-proposals ## Make all diagrams - -.PHONY: diagrams-book -diagrams-book: plantuml-builder ## Make all book diagrams - -docker run -u $(UID):$(GID) -v $(abspath .):/docs$(DOCKER_VOL_OPTS) plantuml-builder /docs/book/**/*.md - -docker run -u $(UID):$(GID) -v $(abspath .):/docs$(DOCKER_VOL_OPTS) plantuml-builder /docs/book/**/*.plantuml - -.PHONY: diagrams-proposals -diagrams-proposals: plantuml-builder ## Make all proposals diagrams - -docker run -u $(UID):$(GID) -v $(abspath .):/docs$(DOCKER_VOL_OPTS) plantuml-builder /docs/proposals/**/*.md - -docker run -u $(UID):$(GID) -v $(abspath .):/docs$(DOCKER_VOL_OPTS) plantuml-builder /docs/proposals/**/*.plantuml - -.PHONY: plantuml-builder -plantuml-builder: Dockerfile ## Make diagram build container - docker build -f Dockerfile -t "plantuml-builder" . - -##@ general - -help: ## Display this help - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/docs/book/Makefile b/docs/book/Makefile index a6d0d75c5675..e5e0ca1b9076 100644 --- a/docs/book/Makefile +++ b/docs/book/Makefile @@ -17,7 +17,7 @@ ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) TOOLS_DIR := $(realpath ../../hack/tools) TOOLS_BIN_DIR := $(TOOLS_DIR)/bin BIN_DIR := bin -MDBOOK_INSTALL := $(realpath ../../scripts/ci-install-mdbook.sh) +MDBOOK_INSTALL := $(realpath ../../scripts/install-mdbook.sh) export PATH := $(TOOLS_BIN_DIR):$(PATH) diff --git a/docs/book/book.toml b/docs/book/book.toml index 4265e8afcc4d..0f07025c6a7a 100644 --- a/docs/book/book.toml +++ b/docs/book/book.toml @@ -12,6 +12,13 @@ additional-css = ["theme/css/custom.css"] [output.html.redirect] "/tasks/upgrade.html" = "/tasks/upgrading-cluster-api-versions.html" +"/agenda.html" = "/agenda/2024.html" +"/agenda/2024.html" = "https://docs.google.com/document/d/1GgFbaYs-H6J5HSQ6a7n4aKpk0nDLE2hgG2NSOM9YIRw" +"/agenda/2023.html" = "https://docs.google.com/document/d/1Ov_rbxOV_O4HeAuwTev1lSwkiLJoBc_HDS1NyI93AcY" +"/agenda/2022.html" = "https://docs.google.com/document/d/1Bk_1581mriNUlwOFpt7SAC3WnaM_gBqLy8aWoiWG-9A" +"/agenda/2021.html" = "" +"/agenda/2020.html" = "https://docs.google.com/document/d/1r8ydd2g2NFE4giD9MqLspv0CJcjzC17e4UUQyyNLx2M" +"/agenda/2019.html" = "https://docs.google.com/document/d/1Ys-DOR5UsgbMEeciuG0HOgDQc8kZsaWIWJeKJ1-UfbY" [preprocessor.tabulate] command = "./util-tabulate.sh" diff --git a/docs/book/src/SUMMARY.md b/docs/book/src/SUMMARY.md index 9a285e34bc9b..24c09d956358 100644 --- a/docs/book/src/SUMMARY.md +++ b/docs/book/src/SUMMARY.md @@ -2,15 +2,17 @@ [Introduction](./introduction.md) [Quick Start](./user/quick-start.md) +[Quick Start Operator](./user/quick-start-operator.md) [Concepts](./user/concepts.md) -[Personas](./user/personas.md) +[Manifesto](./user/manifesto.md) - [Tasks](./tasks/index.md) - [Certificate Management](./tasks/certs/index.md) - [Using Custom Certificates](./tasks/certs/using-custom-certificates.md) - [Generating a Kubeconfig](./tasks/certs/generate-kubeconfig.md) - [Auto Rotate Certificates in KCP](./tasks/certs/auto-rotate-certificates-in-kcp.md) - [Bootstrap](./tasks/bootstrap/index.md) - - [Kubeadm based bootstrap](./tasks/bootstrap/kubeadm-bootstrap.md) + - [Kubeadm based bootstrap](./tasks/bootstrap/kubeadm-bootstrap/index.md) + - [Kubelet configuration](./tasks/bootstrap/kubeadm-bootstrap/kubelet-config.md) - [MicroK8s based bootstrap](./tasks/bootstrap/microk8s-bootstrap.md) - [Upgrading management and workload clusters](./tasks/upgrading-clusters.md) - [External etcd](./tasks/external-etcd.md) @@ -20,12 +22,14 @@ - [Kubeadm based control plane management](./tasks/control-plane/kubeadm-control-plane.md) - [MicroK8s based control plane management](./tasks/control-plane/microk8s-control-plane.md) - [Updating Machine Infrastructure and Bootstrap Templates](tasks/updating-machine-templates.md) + - [Workload bootstrap using GitOps](tasks/workload-bootstrap-gitops.md) - [Automated Machine management](./tasks/automated-machine-management/index.md) - [Scaling](./tasks/automated-machine-management/scaling.md) - [Autoscaling](./tasks/automated-machine-management/autoscaling.md) - [Healthchecking](./tasks/automated-machine-management/healthchecking.md) - [Experimental Features](./tasks/experimental-features/experimental-features.md) - [MachinePools](./tasks/experimental-features/machine-pools.md) + - [MachineSetPreflightChecks](./tasks/experimental-features/machineset-preflight-checks.md) - [ClusterResourceSet](./tasks/experimental-features/cluster-resource-set.md) - [ClusterClass](./tasks/experimental-features/cluster-class/index.md) - [Writing a ClusterClass](./tasks/experimental-features/cluster-class/write-clusterclass.md) @@ -38,6 +42,8 @@ - [Deploying Runtime Extensions](./tasks/experimental-features/runtime-sdk/deploy-runtime-extension.md) - [Ignition Bootstrap configuration](./tasks/experimental-features/ignition.md) - [Running multiple providers](./tasks/multiple-providers.md) + - [Verification of Container Images](./tasks/verify-container-images.md) + - [Diagnostics](./tasks/diagnostics.md) - [Security Guidelines](./security/index.md) - [Pod Security Standards](./security/pod-security-standards.md) - [clusterctl CLI](./clusterctl/overview.md) @@ -58,6 +64,7 @@ - [clusterctl Configuration](clusterctl/configuration.md) - [clusterctl Provider Contract](clusterctl/provider-contract.md) - [clusterctl for Developers](clusterctl/developers.md) + - [clusterctl Extensions with Plugins](clusterctl/plugins.md) - [Developer Guide](./developer/guide.md) - [Repository Layout](./developer/repository-layout.md) - [Rapid iterative development with Tilt](./developer/tilt.md) @@ -78,14 +85,19 @@ - [Metadata propagation](./developer/architecture/controllers/metadata-propagation.md) - [Multi-tenancy](./developer/architecture/controllers/multi-tenancy.md) - [Support multiple instances](./developer/architecture/controllers/support-multiple-instances.md) + - [Tuning controllers](./developer/architecture/controllers/tuning.md) - [Provider Implementers](./developer/providers/implementers.md) - [Version migration](./developer/providers/version-migration.md) - - [v0.3 to v0.4](./developer/providers/v0.3-to-v0.4.md) - - [v0.4 to v1.0](./developer/providers/v0.4-to-v1.0.md) - - [v1.0 to v1.1](./developer/providers/v1.0-to-v1.1.md) - - [v1.1 to v1.2](./developer/providers/v1.1-to-v1.2.md) - - [v1.2 to v1.3](./developer/providers/v1.2-to-v1.3.md) - - [v1.3 to v1.4](./developer/providers/v1.3-to-v1.4.md) + - [v0.3 to v0.4](./developer/providers/migrations/v0.3-to-v0.4.md) + - [v0.4 to v1.0](./developer/providers/migrations/v0.4-to-v1.0.md) + - [v1.0 to v1.1](./developer/providers/migrations/v1.0-to-v1.1.md) + - [v1.1 to v1.2](./developer/providers/migrations/v1.1-to-v1.2.md) + - [v1.2 to v1.3](./developer/providers/migrations/v1.2-to-v1.3.md) + - [v1.3 to v1.4](./developer/providers/migrations/v1.3-to-v1.4.md) + - [v1.4 to v1.5](./developer/providers/migrations/v1.4-to-v1.5.md) + - [v1.5 to v1.6](./developer/providers/migrations/v1.5-to-v1.6.md) + - [v1.6 to v1.7](./developer/providers/migrations/v1.6-to-v1.7.md) + - [v1.7 to v1.8](./developer/providers/migrations/v1.7-to-v1.8.md) - [Provider contracts](./developer/providers/contracts.md) - [Cluster Infrastructure](./developer/providers/cluster-infrastructure.md) - [Machine Infrastructure](./developer/providers/machine-infrastructure.md) @@ -110,5 +122,5 @@ - [Jobs](./reference/jobs.md) - [Code Review in Cluster API](./REVIEWING.md) - [Version Support](./reference/versions.md) - - [Roadmap](./roadmap.md) - [Supported Labels and Annotations](./reference/labels_and_annotations.md) + - [Owner References](./reference/owner_references.md) diff --git a/docs/book/src/clusterctl/commands/additional-commands.md b/docs/book/src/clusterctl/commands/additional-commands.md index 278d8d9a1977..f7b251964b79 100644 --- a/docs/book/src/clusterctl/commands/additional-commands.md +++ b/docs/book/src/clusterctl/commands/additional-commands.md @@ -3,7 +3,7 @@ Display the list of providers and their repository configurations. clusterctl ships with a list of known providers; if necessary, edit -$HOME/.cluster-api/clusterctl.yaml file to add a new provider or to customize existing ones. +$XDG_CONFIG_HOME/cluster-api/clusterctl.yaml file to add a new provider or to customize existing ones. # clusterctl help diff --git a/docs/book/src/clusterctl/commands/alpha-topology-plan.md b/docs/book/src/clusterctl/commands/alpha-topology-plan.md index 3f95b01f4e46..f912ad9285d2 100644 --- a/docs/book/src/clusterctl/commands/alpha-topology-plan.md +++ b/docs/book/src/clusterctl/commands/alpha-topology-plan.md @@ -1,5 +1,13 @@ # clusterctl alpha topology plan + + The `clusterctl alpha topology plan` command can be used to get a plan of how a Cluster topology evolves given file(s) containing resources to be applied to a Cluster. @@ -127,22 +135,12 @@ spec: nodeDrainTimeout: 1s kubeadmConfigSpec: clusterConfiguration: - controllerManager: - extraArgs: { enable-hostpath-provisioner: 'true' } apiServer: certSANs: [ localhost, 127.0.0.1 ] initConfiguration: - nodeRegistration: - criSocket: unix:///var/run/containerd/containerd.sock - kubeletExtraArgs: - cgroup-driver: cgroupfs - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. joinConfiguration: - nodeRegistration: - criSocket: unix:///var/run/containerd/containerd.sock - kubeletExtraArgs: - cgroup-driver: cgroupfs - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: DockerMachineTemplate @@ -174,10 +172,7 @@ spec: template: spec: joinConfiguration: - nodeRegistration: - kubeletExtraArgs: - cgroup-driver: cgroupfs - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. ```
@@ -485,4 +480,3 @@ If only one cluster is affected or if a Cluster is in the input it defaults as t Namespace used for objects with missing namespaces in the input. If not provided, the namespace defined in kubeconfig is used. If a kubeconfig is not available the value `default` is used. - diff --git a/docs/book/src/clusterctl/commands/describe-cluster.md b/docs/book/src/clusterctl/commands/describe-cluster.md index d90348f9c948..6d89914f21b0 100644 --- a/docs/book/src/clusterctl/commands/describe-cluster.md +++ b/docs/book/src/clusterctl/commands/describe-cluster.md @@ -26,7 +26,7 @@ bootstrap object linked to a machine, unless their state differs from the machin By default, the visualization generated by `clusterctl describe cluster` hides details for the sake of simplicity and shortness. However, if required, the user can ask for showing all the detail: -By using the `--disable-grouping` flag, the user can force the visualization to show all the machines +By using `--grouping=false`, the user can force the visualization to show all the machines on separated lines, no matter if they have the same state or not: ![](../../images/describe-cluster-disable-grouping.png) diff --git a/docs/book/src/clusterctl/commands/generate-cluster.md b/docs/book/src/clusterctl/commands/generate-cluster.md index 7640d71f4e41..fdbba262d266 100644 --- a/docs/book/src/clusterctl/commands/generate-cluster.md +++ b/docs/book/src/clusterctl/commands/generate-cluster.md @@ -5,7 +5,7 @@ The `clusterctl generate cluster` command returns a YAML template for creating a For example ```bash -clusterctl generate cluster my-cluster --kubernetes-version v1.16.3 --control-plane-machine-count=3 --worker-machine-count=3 > my-cluster.yaml +clusterctl generate cluster my-cluster --kubernetes-version v1.28.0 --control-plane-machine-count=3 --worker-machine-count=3 > my-cluster.yaml ``` Generates a YAML file named `my-cluster.yaml` with a predefined list of Cluster API objects; Cluster, Machines, @@ -29,14 +29,14 @@ In case there is more than one infrastructure provider, the following syntax can provider to use for the workload cluster: ```bash -clusterctl generate cluster my-cluster --kubernetes-version v1.16.3 \ +clusterctl generate cluster my-cluster --kubernetes-version v1.28.0 \ --infrastructure aws > my-cluster.yaml ``` or ```bash -clusterctl generate cluster my-cluster --kubernetes-version v1.16.3 \ +clusterctl generate cluster my-cluster --kubernetes-version v1.28.0 \ --infrastructure aws:v0.4.1 > my-cluster.yaml ``` @@ -46,7 +46,7 @@ The infrastructure provider authors can provide different types of cluster templ to specify which flavor to use; e.g. ```bash -clusterctl generate cluster my-cluster --kubernetes-version v1.16.3 \ +clusterctl generate cluster my-cluster --kubernetes-version v1.28.0 \ --flavor high-availability > my-cluster.yaml ``` @@ -62,7 +62,7 @@ for cluster templates can be used as well: Use the `--from-config-map` flag to read cluster templates stored in a Kubernetes ConfigMap; e.g. ```bash -clusterctl generate cluster my-cluster --kubernetes-version v1.16.3 \ +clusterctl generate cluster my-cluster --kubernetes-version v1.28.0 \ --from-config-map my-templates > my-cluster.yaml ``` @@ -75,28 +75,28 @@ Use the `--from` flag to read cluster templates stored in a GitHub repository, r or from the standard input; e.g. ```bash -clusterctl generate cluster my-cluster --kubernetes-version v1.16.3 \ +clusterctl generate cluster my-cluster --kubernetes-version v1.28.0 \ --from https://github.com/my-org/my-repository/blob/main/my-template.yaml > my-cluster.yaml ``` or ```bash -clusterctl generate cluster my-cluster --kubernetes-version v1.16.3 \ +clusterctl generate cluster my-cluster --kubernetes-version v1.28.0 \ --from https://foo.bar/my-template.yaml > my-cluster.yaml ``` or ```bash -clusterctl generate cluster my-cluster --kubernetes-version v1.16.3 \ +clusterctl generate cluster my-cluster --kubernetes-version v1.28.0 \ --from ~/my-template.yaml > my-cluster.yaml ``` or ```bash -cat ~/my-template.yaml | clusterctl generate cluster my-cluster --kubernetes-version v1.16.3 \ +cat ~/my-template.yaml | clusterctl generate cluster my-cluster --kubernetes-version v1.28.0 \ --from - > my-cluster.yaml ``` diff --git a/docs/book/src/clusterctl/commands/init.md b/docs/book/src/clusterctl/commands/init.md index 9acf8c0a52e6..07ec8f89ae2d 100644 --- a/docs/book/src/clusterctl/commands/init.md +++ b/docs/book/src/clusterctl/commands/init.md @@ -19,6 +19,10 @@ You can use the `clusterctl config repositories` command to get a list of suppor If the provider of your choice is missing, you can customize the list of supported providers by using the [clusterctl configuration](../configuration.md) file. +Important! The Cluster API project supports ecosystem growth and extensibility. The `clusterctl` CLI carries a list of +predefined providers sponsored by SIG Cluster Lifecycle, and out-of-organization third party open-source repositories. +Each repository is the responsibility of the respective maintainers, including their quality standards and support. + #### Automatically installed providers @@ -127,10 +131,10 @@ See [clusterctl configuration](../configuration.md) for more info about provider

Is it possible to override files read from a provider repository?

If, for any reasons, the user wants to replace the assets available on a provider repository with a locally available asset, -the user is required to save the file under `$HOME/.cluster-api/overrides///`. +the user is required to save the file under `$XDG_CONFIG_HOME/cluster-api/overrides///`. ```bash -$HOME/.cluster-api/overrides/infrastructure-aws/v0.5.2/infrastructure-components.yaml +$XDG_CONFIG_HOME/cluster-api/overrides/infrastructure-aws/v0.5.2/infrastructure-components.yaml ``` @@ -190,7 +194,7 @@ If this happens, there are no guarantees about the proper functioning of `cluste Cluster API providers require a cert-manager version supporting the `cert-manager.io/v1` API to be installed in the cluster. While doing init, clusterctl checks if there is a version of cert-manager already installed. If not, clusterctl will -install a default version (currently cert-manager v1.11.0). See [clusterctl configuration](../configuration.md) for +install a default version (currently cert-manager v1.14.5). See [clusterctl configuration](../configuration.md) for available options to customize this operation. + + + + @@ -37,6 +37,60 @@ It is possible to customize the list of providers for `clusterctl` by changing t +#### Adding a provider to clusterctl + +As a Cluster API project, we always have been more than happy to give visibility to all the open source CAPI providers +by allowing provider's maintainers to add their own project to the pre-defined list of provider shipped with `clusterctl`. + + + +This is the process to add a new provider to the pre-defined list of providers shipped with `clusterctl`: +- As soon as possible, create an issue to the [Cluster API repository](https://sigs.k8s.io/cluster-api) declaring the intent to add a new provider; + each provider must have a unique name & type in the pre-defined list of providers shipped with `clusterctl`; the provider's name + must be declared in the issue above and abide to the following naming convention: + - The name must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character. + - The name length should not exceed 63 characters. + - For providers not in the kubernetes-sigs org, in order to prevent conflicts the `clusterctl` name must be prefixed with + the provider's GitHub org name followed by `-` (see note below). +- Create a PR making the necessary changes to clusterctl and the Cluster API book, e.g. [#9798](https://github.com/kubernetes-sigs/cluster-api/pull/9798), + [9720](https://github.com/kubernetes-sigs/cluster-api/pull/9720/files). + +The Cluster API maintainers will review issues/PRs for adding new providers. If the PR merges before code freeze deadline +for the next Cluster API minor release, changes will be included in the release, otherwise in the next minor +release. Maintainers will also consider if possible/convenient to backport to the current Cluster API minor release +branch to include it in the next patch release. + + + + + #### Creating a provider repository on GitHub You can use a GitHub release to package your provider artifacts for other people to use. @@ -170,6 +224,7 @@ It is strongly recommended that: * Control plane providers release a file called `control-plane-components.yaml` * IPAM providers release a file called `ipam-components.yaml` * Runtime extensions providers release a file called `runtime-extension-components.yaml` +* Add-on providers release a file called `addon-components.yaml` #### Target namespace @@ -250,17 +305,23 @@ providers. | CABPK | cluster.x-k8s.io/provider=bootstrap-kubeadm | | CABPM | cluster.x-k8s.io/provider=bootstrap-microk8s | | CABPKK3S | cluster.x-k8s.io/provider=bootstrap-kubekey-k3s | +| CABPOCNE | cluster.x-k8s.io/provider=bootstrap-ocne | +| CABPK0S | cluster.x-k8s.io/provider=bootstrap-k0smotron | | CACPK | cluster.x-k8s.io/provider=control-plane-kubeadm | | CACPM | cluster.x-k8s.io/provider=control-plane-microk8s | | CACPN | cluster.x-k8s.io/provider=control-plane-nested | | CACPKK3S | cluster.x-k8s.io/provider=control-plane-kubekey-k3s | +| CACPOCNE | cluster.x-k8s.io/provider=control-plane-ocne | +| CACPK0S | cluster.x-k8s.io/provider=control-plane-k0smotron | | CAPA | cluster.x-k8s.io/provider=infrastructure-aws | | CAPB | cluster.x-k8s.io/provider=infrastructure-byoh | | CAPC | cluster.x-k8s.io/provider=infrastructure-cloudstack | | CAPD | cluster.x-k8s.io/provider=infrastructure-docker | +| CAPIM | cluster.x-k8s.io/provider=infrastructure-in-memory | | CAPDO | cluster.x-k8s.io/provider=infrastructure-digitalocean | | CAPG | cluster.x-k8s.io/provider=infrastructure-gcp | | CAPH | cluster.x-k8s.io/provider=infrastructure-hetzner | +| CAPHV | cluster.x-k8s.io/provider=infrastructure-hivelocity | | CAPIBM | cluster.x-k8s.io/provider=infrastructure-ibmcloud | | CAPKK | cluster.x-k8s.io/provider=infrastructure-kubekey | | CAPK | cluster.x-k8s.io/provider=infrastructure-kubevirt | @@ -269,12 +330,16 @@ providers. | CAPO | cluster.x-k8s.io/provider=infrastructure-openstack | | CAPOCI | cluster.x-k8s.io/provider=infrastructure-oci | | CAPP | cluster.x-k8s.io/provider=infrastructure-packet | +| CAPT | cluster.x-k8s.io/provider=infrastructure-tinkerbell | | CAPV | cluster.x-k8s.io/provider=infrastructure-vsphere | | CAPVC | cluster.x-k8s.io/provider=infrastructure-vcluster | | CAPVCD | cluster.x-k8s.io/provider=infrastructure-vcd | | CAPX | cluster.x-k8s.io/provider=infrastructure-nutanix | | CAPZ | cluster.x-k8s.io/provider=infrastructure-azure | | CAPOSC | cluster.x-k8s.io/provider=infrastructure-outscale | +| CAPK0S | cluster.x-k8s.io/provider=infrastructure-k0smotron | +| CAIPAMIC | cluster.x-k8s.io/provider=ipam-in-cluster | + ### Workload cluster templates An infrastructure provider could publish a **cluster templates** file to be used by `clusterctl generate cluster`. @@ -464,7 +529,9 @@ If moving some of excluded object is required, the provider authors should creat exact move sequence to be executed by the user. Additionally, provider authors should be aware that `clusterctl move` assumes all the provider's Controllers respect the -`Cluster.Spec.Paused` field introduced in the v1alpha3 Cluster API specification. +`Cluster.Spec.Paused` field introduced in the v1alpha3 Cluster API specification. If a provider needs to perform extra work in response to a +cluster being paused, `clusterctl move` can be blocked from creating any resources on the destination +management cluster by annotating any resource to be moved with `clusterctl.cluster.x-k8s.io/block-move`. diff --git a/docs/book/src/developer/repository-layout.md b/docs/book/src/developer/repository-layout.md index 7eb2ddf35e0b..69401542c7e1 100644 --- a/docs/book/src/developer/repository-layout.md +++ b/docs/book/src/developer/repository-layout.md @@ -94,7 +94,7 @@ This folder contains resources which are not meant to be used directly by users [~/controllers](https://github.com/kubernetes-sigs/cluster-api/tree/main/controllers) -This folder contains reconciler types which provide access to CAPI controllers present in [~/internal/controllers](https://github.com/kubernetes-sigs/cluster-api/tree/main/internal/controllers) directory to our users. These types can be used users by users to run any of the Cluster API controllers in an external program. +This folder contains reconciler types which provide access to CAPI controllers present in [~/internal/controllers](https://github.com/kubernetes-sigs/cluster-api/tree/main/internal/controllers) directory to our users. These types can be used by users to run any of the Cluster API controllers in an external program. ### Documentation diff --git a/docs/book/src/developer/testing.md b/docs/book/src/developer/testing.md index 07de976898e2..7c8333566c01 100644 --- a/docs/book/src/developer/testing.md +++ b/docs/book/src/developer/testing.md @@ -95,7 +95,7 @@ but in this case the distinctive value of the two layers of testing is determine Run `make test` to execute all unit and integration tests. -Integration tests use the [envtest](https://github.com/kubernetes-sigs/controller-runtime/blob/master/pkg/envtest/doc.go) test framework. The tests need to know the location of the executables called by the framework. The `make test` target installs these executables, and passes this location to the tests as an environment variable. +Integration tests use the [envtest](https://github.com/kubernetes-sigs/controller-runtime/blob/main/pkg/envtest/doc.go) test framework. The tests need to know the location of the executables called by the framework. The `make test` target installs these executables, and passes this location to the tests as an environment variable. @@ -315,7 +315,7 @@ analyzing them via Grafana. Just click on the downwards arrow, enter either a ProwJob URL, a GCS path or a local folder and click on `Import Logs`. This will retrieve the logs and push them to Loki. Alternatively, the logs can be imported via: ```bash - go run ./hack/tools/log-push --log-path= + go run ./hack/tools/internal/log-push --log-path= ``` Examples for log paths: * ProwJob URL: `https://prow.k8s.io/view/gs/kubernetes-jenkins/pr-logs/pull/kubernetes-sigs_cluster-api/6189/pull-cluster-api-e2e-main/1496954690603061248` @@ -338,6 +338,24 @@ analyzing them via Grafana. +As alternative to loki, JSON logs can be visualized with a human readable timestamp using `jq`: + +1. Browse the ProwJob artifacts and download the wanted logfile. +2. Use `jq` to query the logs: + + ```bash + cat manager.log \ + | grep -v "TLS handshake error" \ + | jq -r '(.ts / 1000 | todateiso8601) + " " + (. | tostring)' + ``` + + The `(. | tostring)` part could also be customized to only output parts of the JSON logline. + E.g.: + + * `(.err)` to only output the error message part. + * `(.msg)` to only output the message part. + * `(.controller + " " + .msg)` to output the controller name and message part. + ### Known Issues #### Building images on SELinux @@ -559,8 +577,8 @@ In Cluster API Unit and integration test MUST use [go test]. [Gomega]: https://onsi.github.io/gomega/ [go test]: https://golang.org/pkg/testing/ [controller-runtime]: https://github.com/kubernetes-sigs/controller-runtime -[envtest]: https://github.com/kubernetes-sigs/controller-runtime/tree/master/pkg/envtest -[fakeclient]: https://github.com/kubernetes-sigs/controller-runtime/tree/master/pkg/client/fake +[envtest]: https://github.com/kubernetes-sigs/controller-runtime/tree/main/pkg/envtest +[fakeclient]: https://github.com/kubernetes-sigs/controller-runtime/tree/main/pkg/client/fake [test/helpers]: https://github.com/kubernetes-sigs/cluster-api/tree/main/test/helpers -[vscode-go]: https://marketplace.visualstudio.com/items?itemName=golang.Go \ No newline at end of file +[vscode-go]: https://marketplace.visualstudio.com/items?itemName=golang.Go diff --git a/docs/book/src/developer/tilt.md b/docs/book/src/developer/tilt.md index b8971ed75983..a0a3774fe971 100644 --- a/docs/book/src/developer/tilt.md +++ b/docs/book/src/developer/tilt.md @@ -8,14 +8,14 @@ workflow that offers easy deployments and rapid iterative builds. ## Prerequisites 1. [Docker](https://docs.docker.com/install/): v19.03 or newer -1. [kind](https://kind.sigs.k8s.io): v0.17 or newer -1. [Tilt](https://docs.tilt.dev/install.html): v0.30.8 or newer -1. [kustomize](https://github.com/kubernetes-sigs/kustomize): provided via `make kustomize` -1. [envsubst](https://github.com/drone/envsubst): provided via `make envsubst` -1. [helm](https://github.com/helm/helm): v3.7.1 or newer -1. Clone the [Cluster API](https://github.com/kubernetes-sigs/cluster-api) repository +2. [kind](https://kind.sigs.k8s.io): v0.23.0 or newer +3. [Tilt](https://docs.tilt.dev/install.html): v0.30.8 or newer +4. [kustomize](https://github.com/kubernetes-sigs/kustomize): provided via `make kustomize` +5. [envsubst](https://github.com/drone/envsubst): provided via `make envsubst` +6. [helm](https://github.com/helm/helm): v3.7.1 or newer +7. Clone the [Cluster API](https://github.com/kubernetes-sigs/cluster-api) repository locally -1. Clone the provider(s) you want to deploy locally as well +8. Clone the provider(s) you want to deploy locally as well ## Getting started @@ -69,8 +69,10 @@ If you prefer JSON, you can create a `tilt-settings.json` file instead. YAML wil **allowed_contexts** (Array, default=[]): A list of kubeconfig contexts Tilt is allowed to use. See the Tilt documentation on [allow_k8s_contexts](https://docs.tilt.dev/api.html#api.allow_k8s_contexts) for more details. -**default_registry** (String, default=""): The image registry to use if you need to push images. See the [Tilt +**default_registry** (String, default=[]): The image registry to use if you need to push images. See the [Tilt documentation](https://docs.tilt.dev/api.html#api.default_registry) for more details. +Please note that, in case you are not using a local registry, this value is required; additionally, the Cluster API +Tiltfile protects you from accidental push on `gcr.io/k8s-staging-cluster-api`. **build_engine** (String, default="docker"): The engine used to build images. Can either be `docker` or `podman`. NB: the default is dynamic and will be "podman" if the string "Podman Engine" is found in `docker version` (or in `podman version` if the command fails). @@ -106,10 +108,9 @@ provider's yaml. These substitutions are also used when deploying cluster templa ```yaml kustomize_substitutions: CLUSTER_TOPOLOGY: "true" - EXP_MACHINE_POOL: "true" - EXP_CLUSTER_RESOURCE_SET: "true" EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION: "true" EXP_RUNTIME_SDK: "true" + EXP_MACHINE_SET_PREFLIGHT_CHECKS: "true" ``` {{#tabs name:"tab-tilt-kustomize-substitution" tabs:"AWS,Azure,DigitalOcean,GCP,vSphere"}} @@ -199,15 +200,25 @@ Important! This feature requires the `helm` command to be available in the user' Supported values are: - * `grafana`*: To create dashboards and query `loki` as well as `prometheus`. + * `grafana`*: To create dashboards and query `loki`, `prometheus` and `tempo`. * `kube-state-metrics`: For exposing metrics for Kubernetes and CAPI resources to `prometheus`. * `loki`: To receive and store logs. + * `metrics-server`: To enable `kubectl top node/pod`. * `prometheus`*: For collecting metrics from Kubernetes. * `promtail`: For providing pod logs to `loki`. + * `parca`*: For visualizing profiling data. + * `tempo`: To store traces. * `visualizer`*: Visualize Cluster API resources for each cluster, provide quick access to the specs and status of any resource. \*: Note: the UI will be accessible via a link in the tilt console +**additional_kustomizations** (map[string]string, default={}): If set, install the additional resources built using kustomize to the cluster. +Example: +```yaml +additional_kustomizations: + capv-metrics: ../cluster-api-provider-vsphere/config/metrics +``` + **debug** (Map{string: Map} default{}): A map of named configurations for the provider. The key is the name of the provider. Supported settings: @@ -323,10 +334,11 @@ Custom values for variable substitutions can be set using `kustomize_substitutio ```yaml kustomize_substitutions: - NAMESPACE: default - KUBERNETES_VERSION: v1.26.0 - CONTROL_PLANE_MACHINE_COUNT: 1 - WORKER_MACHINE_COUNT: 3 + NAMESPACE: "default" + KUBERNETES_VERSION: "v1.30.0" + CONTROL_PLANE_MACHINE_COUNT: "1" + WORKER_MACHINE_COUNT: "3" +# Note: kustomize substitutions expects the values to be strings. This can be achieved by wrapping the values in quotation marks. ``` ### Cleaning up your kind cluster and development environment @@ -363,6 +375,7 @@ The following providers are currently defined in the Tiltfile: * **kubeadm-bootstrap**: kubeadm bootstrap provider * **kubeadm-control-plane**: kubeadm control-plane provider * **docker**: Docker infrastructure provider +* **in-memory**: In-memory infrastructure provider * **test-extension**: Runtime extension used by CAPI E2E tests Additional providers can be added by following the procedure described in following paragraphs: @@ -414,7 +427,13 @@ COPY --from=tilt-helper /usr/bin/docker /usr/bin/docker COPY --from=tilt-helper /go/kubernetes/client/bin/kubectl /usr/bin/kubectl ``` -**kustomize_config** (Bool, default=true): Whether or not running kustomize on the ./config folder of the provider. +**kustomize_folder** (String, default=config/default): The folder where the kustomize file for a provider +is defined; the path is relative to the provider root folder. + +**kustomize_options** ([]String, default=[]): Options to be applied when running kustomize for generating the +yaml manifest for a provider. e.g. `"kustomize_options": [ "--load-restrictor=LoadRestrictionsNone" ]` + +**apply_provider_yaml** (Bool, default=true): Whether to apply the provider yaml. Set to `false` if your provider does not have a ./config folder or you do not want it to be applied in the cluster. **go_main** (String, default="main.go"): The go main file if not located at the root of the folder @@ -477,8 +496,70 @@ syntax highlighting and auto-formatting. To enable it for Tiltfile a file associ [Podman](https://podman.io) can be used instead of Docker by following these actions: -1. Enable the podman unix socket (eg. `systemctl --user enable --now podman.socket` on Fedora) +1. Enable the podman unix socket: + - on Linux/systemd: `systemctl --user enable --now podman.socket` + - on macOS: create a podman machine with `podman machine init` 1. Set `build_engine` to `podman` in `tilt-settings.yaml` (optional, only if both Docker & podman are installed) -1. Define the env variable `DOCKER_HOST` to the right socket while running tilt (eg. `DOCKER_HOST=unix:///run/user/$(id -u)/podman/podman.sock tilt up`) +1. Define the env variable `DOCKER_HOST` to the right socket: + - on Linux/systemd: `export DOCKER_HOST=unix:///run/user/$(id -u)/podman/podman.sock` + - on macOS: `export DOCKER_HOST=$(podman machine inspect | jq -r '.[0].ConnectionInfo.PodmanSocket.Path')` where `` is the podman machine name +1. Run `tilt up` + +NB: The socket defined by `DOCKER_HOST` is used only for the `hack/tools/internal/tilt-prepare` command, the image build is running the `podman build`/`podman push` commands. + +## Troubleshooting Tilt + +### Tilt is stuck + +Sometimes tilt looks stuck when it's waiting on connections. + +Ensure that docker/podman is up and running and your kubernetes cluster is reachable. + +### Errors running tilt-prepare + +#### `failed to get current context from the KubeConfig file` + +- Ensure the cluster in the default context is reachable by running `kubectl cluster-info` +- Switch to the right context with `kubectl config use-context` +- Ensure the context is allowed, see [**allowed_contexts** field](#tilt-settings-fields) + +#### `Cannot connect to the Docker daemon` + +- Ensure the docker daemon is running ;) or for podman see [Using Podman](#using-podman) +- If a DOCKER_HOST is specified: + - check that the DOCKER_HOST has the correct prefix (usually `unix://`) + - ensure docker/podman is listening on $DOCKER_HOST using `fuser` / `lsof` / `netstat -u` + +### Errors pulling/pushing to the registry + +#### `connection refused` / `denied` / `not found` + +Ensure the [**default_registry** field](#tilt-settings-fields) is a valid registry where you can pull and push images. + +#### `server gave HTTP response to HTTPS client` + +By default all registries except localhost:5000 are accessed via HTTPS. + +If you run a HTTP registry you may have to configure the registry in docker/podman. + +For example, in podman a `localhost:5001` registry configuration should be declared in `/etc/containers/registries.conf.d` with this content: +```` +[[registry]] +location = "localhost:5001" +insecure = true +```` + +NB: on macOS this configuration should be done **in the podman machine** by running `podman machine ssh `. + +### Errors loading images in kind + +You may try manually to load images in kind by running: +```` +kind load docker-image --name= +```` + +#### `image: "..." not present locally` + +If you are running podman, you may have hit this bug: https://github.com/kubernetes-sigs/kind/issues/2760 -NB: The socket defined by `DOCKER_HOST` is used only for the `hack/tools/tilt-prepare` command, the image build is running the `podman build`/`podman push` commands. +The workaround is to create a `docker` symlink to your `podman` executable and try to load the images again. diff --git a/docs/book/src/images/bootstrap-controller.png b/docs/book/src/images/bootstrap-controller.png index de7dbd11c0e1..242385e69001 100644 Binary files a/docs/book/src/images/bootstrap-controller.png and b/docs/book/src/images/bootstrap-controller.png differ diff --git a/docs/book/src/images/bootstrap-provider.png b/docs/book/src/images/bootstrap-provider.png index c7eff2d583bb..086e1f076dd6 100644 Binary files a/docs/book/src/images/bootstrap-provider.png and b/docs/book/src/images/bootstrap-provider.png differ diff --git a/docs/book/src/images/cluster-admission-cluster-controller.png b/docs/book/src/images/cluster-admission-cluster-controller.png index 4678779b73da..31d75eacae5e 100644 Binary files a/docs/book/src/images/cluster-admission-cluster-controller.png and b/docs/book/src/images/cluster-admission-cluster-controller.png differ diff --git a/docs/book/src/images/cluster-admission-machine-controller.png b/docs/book/src/images/cluster-admission-machine-controller.png index 4952fe2187fe..67c21f1d6328 100644 Binary files a/docs/book/src/images/cluster-admission-machine-controller.png and b/docs/book/src/images/cluster-admission-machine-controller.png differ diff --git a/docs/book/src/images/cluster-admission-machinedeployment-controller.plantuml b/docs/book/src/images/cluster-admission-machinedeployment-controller.plantuml index bc3b1134fee8..be244476d42a 100644 --- a/docs/book/src/images/cluster-admission-machinedeployment-controller.plantuml +++ b/docs/book/src/images/cluster-admission-machinedeployment-controller.plantuml @@ -21,6 +21,7 @@ repeat else if (RollingUpdate Deployment Strategy) then (yes) :Select newest MachineSet; + :Propagate in-place changes to newest MachineSet; if (Too Many replicas) then (yes) #LightBlue:Scale machineSet down; elseif (Not Enough Replicas) @@ -33,6 +34,7 @@ repeat endif elseif (OnDelete Deployment Strategy) then (yes) :Select newest MachineSet; + :Propagate in-place changes to newest MachineSet; if (Too Many replicas) then (yes) #LightBlue:Scale machineSet down; elseif (Not Enough Replicas) diff --git a/docs/book/src/images/cluster-admission-machinedeployment-controller.png b/docs/book/src/images/cluster-admission-machinedeployment-controller.png index 76fdab31da2d..dcc81260a1d6 100644 Binary files a/docs/book/src/images/cluster-admission-machinedeployment-controller.png and b/docs/book/src/images/cluster-admission-machinedeployment-controller.png differ diff --git a/docs/book/src/images/cluster-admission-machinepool-controller.png b/docs/book/src/images/cluster-admission-machinepool-controller.png index b2f347f501d9..892393c7ea6f 100644 Binary files a/docs/book/src/images/cluster-admission-machinepool-controller.png and b/docs/book/src/images/cluster-admission-machinepool-controller.png differ diff --git a/docs/book/src/images/cluster-admission-machineset-controller.plantuml b/docs/book/src/images/cluster-admission-machineset-controller.plantuml index b19c35110b8c..5f44fb88ba82 100644 --- a/docs/book/src/images/cluster-admission-machineset-controller.plantuml +++ b/docs/book/src/images/cluster-admission-machineset-controller.plantuml @@ -11,6 +11,7 @@ repeat #LightBlue:Adopt machine; endif repeat while (More machines) is (yes) + :Propagate in-place changes to existing Machines; if (Not enough replicas) then (yes) #LightBlue:Boot new machine; elseif (Too many replicas) then (yes) diff --git a/docs/book/src/images/cluster-admission-machineset-controller.png b/docs/book/src/images/cluster-admission-machineset-controller.png index 3b70a8daa736..4f19976661b0 100644 Binary files a/docs/book/src/images/cluster-admission-machineset-controller.png and b/docs/book/src/images/cluster-admission-machineset-controller.png differ diff --git a/docs/book/src/images/cluster-infra-provider.png b/docs/book/src/images/cluster-infra-provider.png index d8bbca0ca5e7..567f10a01693 100644 Binary files a/docs/book/src/images/cluster-infra-provider.png and b/docs/book/src/images/cluster-infra-provider.png differ diff --git a/docs/book/src/images/cluster-resource-set-controller.png b/docs/book/src/images/cluster-resource-set-controller.png index fa456a18010e..9a85c12734c0 100644 Binary files a/docs/book/src/images/cluster-resource-set-controller.png and b/docs/book/src/images/cluster-resource-set-controller.png differ diff --git a/docs/book/src/images/cluster-topology-controller.png b/docs/book/src/images/cluster-topology-controller.png index 93e2eedd2e37..8a3358f7129b 100644 Binary files a/docs/book/src/images/cluster-topology-controller.png and b/docs/book/src/images/cluster-topology-controller.png differ diff --git a/docs/book/src/images/cluster-topology-reconciller.png b/docs/book/src/images/cluster-topology-reconciller.png index c0c010bdf2d6..d875df566b0e 100644 Binary files a/docs/book/src/images/cluster-topology-reconciller.png and b/docs/book/src/images/cluster-topology-reconciller.png differ diff --git a/docs/book/src/images/control-plane-controller.png b/docs/book/src/images/control-plane-controller.png index b768a8f04d5a..2693c13bcf57 100644 Binary files a/docs/book/src/images/control-plane-controller.png and b/docs/book/src/images/control-plane-controller.png differ diff --git a/docs/book/src/images/kubeadm-control-plane-machines-resources.png b/docs/book/src/images/kubeadm-control-plane-machines-resources.png index 91784a6b66ac..160dbfc22fd9 100644 Binary files a/docs/book/src/images/kubeadm-control-plane-machines-resources.png and b/docs/book/src/images/kubeadm-control-plane-machines-resources.png differ diff --git a/docs/book/src/images/machine-infra-provider.png b/docs/book/src/images/machine-infra-provider.png index 38ef7da809c4..792279645b9a 100644 Binary files a/docs/book/src/images/machine-infra-provider.png and b/docs/book/src/images/machine-infra-provider.png differ diff --git a/docs/book/src/images/machinehealthcheck-controller.png b/docs/book/src/images/machinehealthcheck-controller.png index 38672cd8f747..78f235b3b537 100644 Binary files a/docs/book/src/images/machinehealthcheck-controller.png and b/docs/book/src/images/machinehealthcheck-controller.png differ diff --git a/docs/book/src/images/management-workload-same-cluster.png b/docs/book/src/images/management-workload-same-cluster.png index 5e25f3bba209..ac7bfe4a235d 100644 Binary files a/docs/book/src/images/management-workload-same-cluster.png and b/docs/book/src/images/management-workload-same-cluster.png differ diff --git a/docs/book/src/images/management-workload-separate-clusters.png b/docs/book/src/images/management-workload-separate-clusters.png index 325fe5802af9..b10623077f56 100644 Binary files a/docs/book/src/images/management-workload-separate-clusters.png and b/docs/book/src/images/management-workload-separate-clusters.png differ diff --git a/docs/book/src/images/metadata-propagation.jpg b/docs/book/src/images/metadata-propagation.jpg new file mode 100644 index 000000000000..a10bdc4fb4c1 Binary files /dev/null and b/docs/book/src/images/metadata-propagation.jpg differ diff --git a/docs/book/src/images/metadata-propagation.pptx b/docs/book/src/images/metadata-propagation.pptx new file mode 100644 index 000000000000..cdf43e37e684 Binary files /dev/null and b/docs/book/src/images/metadata-propagation.pptx differ diff --git a/docs/book/src/images/runtime-sdk-topology-mutation.png b/docs/book/src/images/runtime-sdk-topology-mutation.png index cd1804bea312..92da0abe6f53 100644 Binary files a/docs/book/src/images/runtime-sdk-topology-mutation.png and b/docs/book/src/images/runtime-sdk-topology-mutation.png differ diff --git a/docs/book/src/images/worker-machines-resources.png b/docs/book/src/images/worker-machines-resources.png index d710d4d91582..0e2d05971aa8 100644 Binary files a/docs/book/src/images/worker-machines-resources.png and b/docs/book/src/images/worker-machines-resources.png differ diff --git a/docs/book/src/introduction.md b/docs/book/src/introduction.md index 2c78fed7c50f..d62b258204c6 100644 --- a/docs/book/src/introduction.md +++ b/docs/book/src/introduction.md @@ -7,15 +7,13 @@ Started by the Kubernetes Special Interest Group (SIG) [Cluster Lifecycle](https ## ⚠️ Breaking Changes ⚠️ +Also, this [guide](https://github.com/kubernetes/registry.k8s.io/tree/main/docs/mirroring) provide instructions about how to identify images to mirror and how to use mirrored images. ## Getting started @@ -29,8 +27,11 @@ contributing guide for more details.

ClusterAPI documentation versions

-This book documents ClusterAPI v1.4. For other Cluster API versions please see the corresponding documentation: +This book documents ClusterAPI v1.7. For other Cluster API versions please see the corresponding documentation: * [main.cluster-api.sigs.k8s.io](https://main.cluster-api.sigs.k8s.io) +* [release-1-6.cluster-api.sigs.k8s.io](https://release-1-6.cluster-api.sigs.k8s.io) +* [release-1-5.cluster-api.sigs.k8s.io](https://release-1-5.cluster-api.sigs.k8s.io) +* [release-1-4.cluster-api.sigs.k8s.io](https://release-1-4.cluster-api.sigs.k8s.io) * [release-1-3.cluster-api.sigs.k8s.io](https://release-1-3.cluster-api.sigs.k8s.io) * [release-1-2.cluster-api.sigs.k8s.io](https://release-1-2.cluster-api.sigs.k8s.io) * [release-1-1.cluster-api.sigs.k8s.io](https://release-1-1.cluster-api.sigs.k8s.io) @@ -54,6 +55,23 @@ However, while kubeadm and other bootstrap providers reduce installation complex SIG Cluster Lifecycle began the Cluster API project as a way to address these gaps by building declarative, Kubernetes-style APIs, that automate cluster creation, configuration, and management. Using this model, Cluster API can also be extended to support any infrastructure provider (AWS, Azure, vSphere, etc.) or bootstrap provider (kubeadm is default) you need. See the growing list of [available providers](./reference/providers.md). -{{#include ../../scope-and-objectives.md:Goals}} +### Goals + +- To manage the lifecycle (create, scale, upgrade, destroy) of Kubernetes-conformant clusters using a declarative API. +- To work in different environments, both on-premises and in the cloud. +- To define common operations, provide a default implementation, and provide the ability to swap out implementations for alternative ones. +- To reuse and integrate existing ecosystem components rather than duplicating their functionality (e.g. node-problem-detector, cluster autoscaler, SIG-Multi-cluster). +- To provide a transition path for Kubernetes lifecycle products to adopt Cluster API incrementally. Specifically, existing cluster lifecycle management tools should be able to adopt Cluster API in a staged manner, over the course of multiple releases, or even adopting a subset of Cluster API. + +### Non-goals + +- To add these APIs to Kubernetes core (kubernetes/kubernetes). + - This API should live in a namespace outside the core and follow the best practices defined by api-reviewers, but is not subject to core-api constraints. +- To manage the lifecycle of infrastructure unrelated to the running of Kubernetes-conformant clusters. +- To force all Kubernetes lifecycle products (kOps, Kubespray, GKE, AKS, EKS, IKS etc.) to support or use these APIs. +- To manage non-Cluster API provisioned Kubernetes-conformant clusters. +- To manage a single cluster spanning multiple infrastructure providers. +- To configure a machine at any time other than create or upgrade. +- To duplicate functionality that exists or is coming to other tooling, e.g., updating kubelet configuration (c.f. dynamic kubelet configuration), or updating apiserver, controller-manager, scheduler configuration (c.f. component-config effort) after the cluster is deployed. {{#include ../../../README.md:Community}} diff --git a/docs/book/src/reference/glossary.md b/docs/book/src/reference/glossary.md index 0b77cdb02831..5f82b88d82ac 100644 --- a/docs/book/src/reference/glossary.md +++ b/docs/book/src/reference/glossary.md @@ -26,9 +26,9 @@ A temporary cluster that is used to provision a Target Management cluster. ### Bootstrap provider Refers to a [provider](#provider) that implements a solution for the [bootstrap](#bootstrap) process. -Bootstrap provider's interaction with Cluster API is based on what is defined in the [Cluster API contract](#contract). +Bootstrap provider's interaction with Cluster API is based on what is defined in the [Cluster API contract](#contract). -See [CABPK](#cabpk). +See [CABPK](#cabpk). # C --- @@ -45,6 +45,12 @@ Cluster API Provider AWS ### CABPK Cluster API Bootstrap Provider Kubeadm +### CABPOCNE +Cluster API Bootstrap Provider Oracle Cloud Native Environment (OCNE) + +### CACPOCNE +Cluster API Control Plane Provider Oracle Cloud Native Environment (OCNE) + ### CAPC Cluster API Provider CloudStack @@ -60,9 +66,18 @@ Cluster API Google Cloud Provider ### CAPH Cluster API Provider Hetzner +### CAPHV +Cluster API Provider Hivelocity + ### CAPIBM Cluster API Provider IBM Cloud +### CAPIO +Cluster API Operator + +### CAPL +Cluster API Provider Akamai (Linode) + ### CAPM3 Cluster API Provider Metal3 @@ -81,12 +96,15 @@ Cluster API Provider Kubevirt ### CAPO Cluster API Provider OpenStack -## CAPOSC +### CAPOSC Cluster API Provider Outscale ### CAPOCI Cluster API Provider Oracle Cloud Infrastructure (OCI) +### CAPT +Cluster API Provider Tinkerbell + ### CAPV Cluster API Provider vSphere @@ -99,6 +117,9 @@ Cluster API Provider VMware Cloud Director ### CAPZ Cluster API Provider Azure +### CAIPAMIC +Cluster API IPAM Provider In Cluster + ### Cloud provider Or __Cloud service provider__ @@ -126,6 +147,12 @@ See [core provider](#core-provider) The Cluster API execution model, a set of controllers cooperating in managing the Kubernetes cluster lifecycle. +### Cluster Infrastructure + +or __Kubernetes Cluster Infrastructure__ + +Defines the **infrastructure that supports a Kubernetes cluster**, like e.g. VPC, security groups, load balancers, etc. Please note that in the context of managed Kubernetes some of those components are going to be provided by the corresponding abstraction for a specific Cloud provider (EKS, OKE, AKS etc), and thus Cluster API should not take care of managing a subset or all those components. + ### Contract Or __Cluster API contract__ @@ -149,7 +176,7 @@ See [KCP](#kcp). ### Core provider -Refers to a [provider](#provider) that implements Cluster API core controllers; if you +Refers to a [provider](#provider) that implements Cluster API core controllers; if you consider that the first project that must be deployed in a management Cluster is Cluster API itself, it should be clear why the Cluster API project is also referred to as the core provider. @@ -190,7 +217,7 @@ see [Server](#server) ### Infrastructure provider -Refers to a [provider](#provider) that implements provisioning of infrastructure/computational resources required by +Refers to a [provider](#provider) that implements provisioning of infrastructure/computational resources required by the Cluster or by Machines (e.g. VMs, networking, etc.). Infrastructure provider's interaction with Cluster API is based on what is defined in the [Cluster API contract](#contract). @@ -199,7 +226,7 @@ When there is more than one way to obtain resources from the same infrastructure For a complete list of providers see [Provider Implementations](providers.md). -### Inline patch +### Inline patch A [patch](#patch) defined inline in a [ClusterClass](#clusterclass). An alternative to an [external patch](#external-patch). @@ -263,6 +290,10 @@ See also: [Server](#server) Perform create, scale, upgrade, or destroy operations on the cluster. +### Managed Kubernetes + +Managed Kubernetes refers to any Kubernetes cluster provisioning and maintenance abstraction, usually exposed as an API, that is natively available in a Cloud provider. For example: [EKS](https://aws.amazon.com/eks/), [OKE](https://www.oracle.com/cloud/cloud-native/container-engine-kubernetes/), [AKS](https://azure.microsoft.com/en-us/products/kubernetes-service), [GKE](https://cloud.google.com/kubernetes-engine), [IBM Cloud Kubernetes Service](https://www.ibm.com/cloud/kubernetes-service), [DOKS](https://www.digitalocean.com/products/kubernetes), and many more throughout the Kubernetes Cloud Native ecosystem. + ### Managed Topology See [Topology](#topology) @@ -300,7 +331,7 @@ A generically understood combination of a kernel and system-level userspace inte # P --- -### Patch +### Patch A set of instructions describing modifications to a Kubernetes object. Examples include JSON Patch and JSON Merge Patch. diff --git a/docs/book/src/reference/jobs.md b/docs/book/src/reference/jobs.md index 04c31a2067d5..40570747b437 100644 --- a/docs/book/src/reference/jobs.md +++ b/docs/book/src/reference/jobs.md @@ -7,48 +7,62 @@ It also documents the cluster-api specific configuration in test-infra. > NOTE: To see which test jobs execute which tests or e2e tests, you can click on the links which lead to the respective test overviews in testgrid. -### Presubmits +The dashboards for the ProwJobs can be found here: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api + +More details about ProwJob configurations can be found here: [cluster-api-prowjob-gen.yaml](https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes-sigs/cluster-api/cluster-api-prowjob-gen.yaml) -**Legend**: -* ✳️️ jobs that don't have to be run successfully for merge -* ✴️ jobs that are not triggered automatically for every commit +### Presubmits Prow Presubmits: -* [pull-cluster-api-build-main] `./scripts/ci-build.sh` -* ✳️️ [pull-cluster-api-apidiff-main] `./scripts/ci-apidiff.sh` -* [pull-cluster-api-verify-main] `./scripts/ci-verify.sh` -* [pull-cluster-api-test-main] `./scripts/ci-test.sh` -* [pull-cluster-api-test-mink8s-main] `./scripts/ci-test.sh` -* [pull-cluster-api-e2e-main] `./scripts/ci-e2e.sh` - * GINKGO_FOCUS: `[PR-Blocking]` -* ✳️️ [pull-cluster-api-e2e-informing-main] `./scripts/ci-e2e.sh` - * GINKGO_FOCUS: `[PR-Informing]`, GINKGO_SKIP: `[IPv6]` -* ✳️️ [pull-cluster-api-e2e-informing-ipv6-main] `./scripts/ci-e2e.sh` - * GINKGO_FOCUS: `[IPv6] [PR-Informing]`, IP_FAMILY: `IPv6` -* ✳️️ ✴️ [pull-cluster-api-e2e-full-main] `./scripts/ci-e2e.sh` - * GINKGO_SKIP: `[PR-Blocking] [Conformance] [K8s-Upgrade] [IPv6]` (i.e. "no tags") -* ✳️️ ✴️ [pull-cluster-api-e2e-workload-upgrade-1-26-latest-main] `./scripts/ci-e2e.sh` FROM: `stable-1.26` TO: `ci/latest-1.27` - * GINKGO_FOCUS: `[K8s-Upgrade]` +* mandatory for merge, always run: + * pull-cluster-api-build-main `./scripts/ci-build.sh` + * pull-cluster-api-verify-main `./scripts/ci-verify.sh` +* mandatory for merge, run if go code changes: + * pull-cluster-api-test-main `./scripts/ci-test.sh` + * pull-cluster-api-e2e-blocking-main `./scripts/ci-e2e.sh` + * GINKGO_FOCUS: `[PR-Blocking]` +* optional for merge, run if go code changes: + * pull-cluster-api-apidiff-main `./scripts/ci-apidiff.sh` +* mandatory for merge, run if manually triggered: + * pull-cluster-api-test-mink8s-main `./scripts/ci-test.sh` + * pull-cluster-api-e2e-mink8s-main `./scripts/ci-e2e.sh` + * GINKGO_SKIP: `[Conformance]|[IPv6]` + * pull-cluster-api-e2e-dualstack-and-ipv6-main `./scripts/ci-e2e.sh` + * DOCKER_IN_DOCKER_IPV6_ENABLED: `true` + * GINKGO_SKIP: `[Conformance]` + * pull-cluster-api-e2e-main `./scripts/ci-e2e.sh` + * GINKGO_SKIP: `[Conformance]|[IPv6]` + * pull-cluster-api-e2e-upgrade-* `./scripts/ci-e2e.sh` + * GINKGO_FOCUS: `[Conformance] [K8s-Upgrade]` + * pull-cluster-api-e2e-conformance-main `./scripts/ci-e2e.sh` + * GINKGO_FOCUS: `[Conformance] [K8s-Install]` + * pull-cluster-api-e2e-conformance-ci-latest-main `./scripts/ci-e2e.sh` + * GINKGO_FOCUS: `[Conformance] [K8s-Install-ci-latest]` GitHub Presubmit Workflows: -* golangci-lint: golangci/golangci-lint-action +* PR golangci-lint: golangci/golangci-lint-action * Runs golangci-lint. Can be run locally via `make lint`. -* verify: kubernetes-sigs/kubebuilder-release-tools verifier +* PR verify: kubernetes-sigs/kubebuilder-release-tools verifier * Verifies the PR titles have a valid format, i.e. contains one of the valid icons. * Verifies the PR description is valid, i.e. is long enough. -* Check PR Markdown links (run when markdown files changed) - * Checks markdown for broken links. -* dependabot (run on dependabot PRs) +* PR check Markdown links (run when markdown files changed) + * Checks markdown modified in PR for broken links. +* PR dependabot (run on dependabot PRs) * Regenerates Go modules and code. -* release (run on tags) - * Creates a GitHub release with release notes for the tag. - +* PR approve GH Workflows + * Approves other GH workflows if the `ok-to-test` label is set. GitHub Weekly Workflows: -* golangci-lint: golangci/golangci-lint-action - * Weekly check all Markdown links -* scan-images: +* Weekly check all Markdown links + * Checks markdown across the repo for broken links. +* Weekly image scan: * Scan all images for vulnerabilities. Can be run locally via `make verify-container-images` +* Weekly release test: + * Test the the `release` make target is working without errors. + +Other Github workflows +* release (runs when tags are pushed) + * Creates a GitHub release with release notes for the tag. ### Postsubmits @@ -58,26 +72,21 @@ Prow Postsubmits: ### Periodics Prow Periodics: -* [periodic-cluster-api-test-main] `./scripts/ci-test.sh` -* [periodic-cluster-api-test-mink8s-main] `./scripts/ci-test.sh` - * KUBEBUILDER_ENVTEST_KUBERNETES_VERSION: `1.23.5` -* [periodic-cluster-api-e2e-main] `./scripts/ci-e2e.sh` - * GINKGO_SKIP: `[Conformance] [K8s-Upgrade]|[IPv6]` -* [periodic-cluster-api-e2e-mink8s-main] `./scripts/ci-e2e.sh` - * GINKGO_SKIP: `[Conformance] [K8s-Upgrade]|[IPv6]` - * KUBERNETES_VERSION_MANAGEMENT: `stable-1.23` -* [periodic-cluster-api-e2e-workload-upgrade-1-21-1-22-main] `./scripts/ci-e2e.sh` FROM: `stable-1.21` TO: `stable-1.22` - * GINKGO_FOCUS: `[K8s-Upgrade]` -* [periodic-cluster-api-e2e-workload-upgrade-1-22-1-23-main] `./scripts/ci-e2e.sh` FROM: `stable-1.22` TO: `stable-1.23` - * GINKGO_FOCUS: `[K8s-Upgrade]` -* [periodic-cluster-api-e2e-workload-upgrade-1-23-1-24-main] `./scripts/ci-e2e.sh` FROM: `stable-1.23` TO: `stable-1.24` - * GINKGO_FOCUS: `[K8s-Upgrade]` -* [periodic-cluster-api-e2e-workload-upgrade-1-24-1-25-main] `./scripts/ci-e2e.sh` FROM: `stable-1.24` TO: `stable-1.25` - * GINKGO_FOCUS: `[K8s-Upgrade]` -* [periodic-cluster-api-e2e-workload-upgrade-1-25-1-26-main] `./scripts/ci-e2e.sh` FROM: `stable-1.25` TO: `stable-1.26` - * GINKGO_FOCUS: `[K8s-Upgrade]` -* [periodic-cluster-api-e2e-workload-upgrade-1-26-latest-main] `./scripts/ci-e2e.sh` FROM: `stable-1.26` TO: `ci/latest-1.27` - * GINKGO_FOCUS: `[K8s-Upgrade]` +* periodic-cluster-api-test-main `./scripts/ci-test.sh` +* periodic-cluster-api-test-mink8s-main `./scripts/ci-test.sh` +* periodic-cluster-api-e2e-main `./scripts/ci-e2e.sh` + * GINKGO_SKIP: `[Conformance]|[IPv6]` +* periodic-cluster-api-e2e-mink8s-main `./scripts/ci-e2e.sh` + * GINKGO_SKIP: `[Conformance]|[IPv6]` +* periodic-cluster-api-e2e-dualstack-and-ipv6-main `./scripts/ci-e2e.sh` + * DOCKER_IN_DOCKER_IPV6_ENABLED: `true` + * GINKGO_SKIP: `[Conformance]` +* periodic-cluster-api-e2e-upgrade-* `./scripts/ci-e2e.sh` + * GINKGO_FOCUS: `[Conformance] [K8s-Upgrade]` +* periodic-cluster-api-e2e-conformance-main `./scripts/ci-e2e.sh` + * GINKGO_FOCUS: `[Conformance] [K8s-Install]` +* periodic-cluster-api-e2e-conformance-ci-latest-main `./scripts/ci-e2e.sh` + * GINKGO_FOCUS: `[Conformance] [K8s-Install-ci-latest]` * [cluster-api-push-images-nightly] Google Cloud Build: `make release-staging-nightly` ## Test-infra configuration @@ -103,27 +112,5 @@ Prow Periodics: -[pull-cluster-api-build-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-pr-build-main -[pull-cluster-api-apidiff-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-pr-apidiff-main -[pull-cluster-api-verify-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-pr-verify-main -[pull-cluster-api-test-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-pr-test-main -[pull-cluster-api-test-mink8s-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-pr-test-mink8s-main -[pull-cluster-api-e2e-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-pr-e2e-main -[pull-cluster-api-e2e-informing-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-pr-e2e-informing-main -[pull-cluster-api-e2e-informing-ipv6-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-pr-e2e-informing-ipv6-main -[pull-cluster-api-e2e-full-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-pr-e2e-full-main -[pull-cluster-api-e2e-workload-upgrade-1-26-latest-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-pr-e2e-main-1-26-latest -[periodic-cluster-api-test-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-test-main -[periodic-cluster-api-test-mink8s-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-test-mink8s-main -[periodic-cluster-api-e2e-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-e2e-main -[periodic-cluster-api-e2e-upgrade-v0-3-to-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-e2e-upgrade-v0-3-to-main -[periodic-cluster-api-e2e-upgrade-v1-0-to-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-e2e-upgrade-v1-0-to-main -[periodic-cluster-api-e2e-mink8s-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-e2e-mink8s-main -[periodic-cluster-api-e2e-workload-upgrade-1-21-1-22-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-e2e-main-1-21-1-22 -[periodic-cluster-api-e2e-workload-upgrade-1-22-1-23-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-e2e-main-1-22-1-23 -[periodic-cluster-api-e2e-workload-upgrade-1-23-1-24-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-e2e-main-1-23-1-24 -[periodic-cluster-api-e2e-workload-upgrade-1-24-1-25-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-e2e-main-1-24-1-25 -[periodic-cluster-api-e2e-workload-upgrade-1-25-1-26-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-e2e-main-1-25-1-26 -[periodic-cluster-api-e2e-workload-upgrade-1-26-latest-main]: https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api#capi-e2e-main-1-26-latest [cluster-api-push-images-nightly]: https://testgrid.k8s.io/sig-cluster-lifecycle-image-pushes#cluster-api-push-images-nightly [post-cluster-api-push-images]: https://testgrid.k8s.io/sig-cluster-lifecycle-image-pushes#post-cluster-api-push-images diff --git a/docs/book/src/reference/labels_and_annotations.md b/docs/book/src/reference/labels_and_annotations.md index ed3a67582b77..00221975156c 100644 --- a/docs/book/src/reference/labels_and_annotations.md +++ b/docs/book/src/reference/labels_and_annotations.md @@ -13,6 +13,7 @@ | cluster.x-k8s.io/set-name | It is set on machines if they're controlled by MachineSet. The value of this label may be a hash if the MachineSet name is longer than 63 characters. | | cluster.x-k8s.io/control-plane-name | It is set on machines if they're controlled by a control plane. The value of this label may be a hash if the control plane name is longer than 63 characters. | | cluster.x-k8s.io/deployment-name | It is set on machines if they're controlled by a MachineDeployment. | +| cluster.x-k8s.io/pool-name | It is set on machines if they're controlled by a MachinePool. | | machine-template-hash | It is applied to Machines in a MachineDeployment containing the hash of the template. |
@@ -22,7 +23,10 @@ | Annotation | Note | |:-----------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | clusterctl.cluster.x-k8s.io/skip-crd-name-preflight-check | Can be placed on provider CRDs, so that clusterctl doesn't emit an error if the CRD doesn't comply with Cluster APIs naming scheme. Only CRDs that are referenced by core Cluster API CRDs have to comply with the naming scheme. | +| clusterctl.cluster.x-k8s.io/delete-for-move | DeleteForMoveAnnotation will be set to objects that are going to be deleted from the source cluster after being moved to the target cluster during the clusterctl move operation. It will help any validation webhook to take decision based on it. | +| clusterctl.cluster.x-k8s.io/block-move | BlockMoveAnnotation prevents the cluster move operation from starting if it is defined on at least one of the objects in scope. Provider controllers are expected to set the annotation on resources that cannot be instantaneously paused and remove the annotation when the resource has been actually paused. | | unsafe.topology.cluster.x-k8s.io/disable-update-class-name-check | It can be used to disable the webhook check on update that disallows a pre-existing Cluster to be populated with Topology information and Class. | +| unsafe.topology.cluster.x-k8s.io/disable-update-version-check | It can be used to disable the webhook checks on update that disallows updating the `.topology.spec.version` on certain conditions. | | cluster.x-k8s.io/cluster-name | It is set on nodes identifying the name of the cluster the node belongs to. | | cluster.x-k8s.io/cluster-namespace | It is set on nodes identifying the namespace of the cluster the node belongs to. | | cluster.x-k8s.io/machine | It is set on nodes identifying the machine the node belongs to. | @@ -34,11 +38,14 @@ | cluster.x-k8s.io/cloned-from-name | It is the infrastructure machine annotation that stores the name of the infrastructure template resource that was cloned for the machine. This annotation is set only during cloning a template. Older/adopted machines will not have this annotation. | | cluster.x-k8s.io/cloned-from-groupkind | It is the infrastructure machine annotation that stores the group-kind of the infrastructure template resource that was cloned for the machine. This annotation is set only during cloning a template. Older/adopted machines will not have this annotation. | | cluster.x-k8s.io/skip-remediation | It is used to mark the machines that should not be considered for remediation by MachineHealthCheck reconciler. | +| cluster.x-k8s.io/remediate-machine | It can be applied to a machine to manually mark it for remediation by MachineHealthCheck reconciler. | | cluster.x-k8s.io/managed-by | It can be applied to InfraCluster resources to signify that some external system is managing the cluster infrastructure. Provider InfraCluster controllers will ignore resources with this annotation. An external controller must fulfill the contract of the InfraCluster resource. External infrastructure providers should ensure that the annotation, once set, cannot be removed. | | cluster.x-k8s.io/replicas-managed-by | It can be applied to MachinePool resources to signify that some external system is managing infrastructure scaling for that pool. See [the MachinePool documentation](../developer/architecture/controllers/machine-pool.md#externally-managed-autoscaler) for more details. | +| cluster.x-k8s.io/skip-machineset-preflight-checks | It can be applied on MachineDeployment and MachineSet resources to specify a comma-separated list of preflight checks that should be skipped during MachineSet reconciliation. Supported preflight checks are: All, KubeadmVersionSkew, KubernetesVersionSkew, ControlPlaneIsStable. | | topology.cluster.x-k8s.io/defer-upgrade | It can be used to defer the Kubernetes upgrade of a single MachineDeployment topology. If the annotation is set on a MachineDeployment topology in Cluster.spec.topology.workers, the Kubernetes upgrade for this MachineDeployment topology is deferred. It doesn't affect other MachineDeployment topologies. | | topology.cluster.x-k8s.io/dry-run | It is an annotation that gets set on objects by the topology controller only during a server side dry run apply operation. It is used for validating update webhooks for objects which get updated by template rotation (e.g. InfrastructureMachineTemplate). When the annotation is set and the admission request is a dry run, the webhook should deny validation due to immutability. By that the request will succeed (without any changes to the actual object because it is a dry run) and the topology controller will receive the resulting object. | | topology.cluster.x-k8s.io/hold-upgrade-sequence | It can be used to hold the entire MachineDeployment upgrade sequence. If the annotation is set on a MachineDeployment topology in Cluster.spec.topology.workers, the Kubernetes upgrade for this MachineDeployment topology and all subsequent ones is deferred. | +| topology.cluster.x-k8s.io/upgrade-concurrency | It can be used to configure the maximum concurrency while upgrading MachineDeployments of a classy Cluster. It is set as a top level annotation on the Cluster object. The value should be >= 1. If unspecified the upgrade concurrency will default to 1. | | machine.cluster.x-k8s.io/certificates-expiry | It captures the expiry date of the machine certificates in RFC3339 format. It is used to trigger rollout of control plane machines before certificates expire. It can be set on BootstrapConfig and Machine objects. The value set on Machine object takes precedence. The annotation is only used by control plane machines. | | machine.cluster.x-k8s.io/exclude-node-draining | It explicitly skips node draining if set. | | machine.cluster.x-k8s.io/exclude-wait-for-node-volume-detach | It explicitly skips the waiting for node volume detaching if set. | diff --git a/docs/book/src/reference/owner_references.md b/docs/book/src/reference/owner_references.md new file mode 100644 index 000000000000..bf01d4a9b8e2 --- /dev/null +++ b/docs/book/src/reference/owner_references.md @@ -0,0 +1,82 @@ +# Owner References + + +Cluster API uses [Kubernetes owner references](https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/) to track relationships between objects. These references are used for Kubernetes garbage collection, which is the basis of Cluster deletion in CAPI. They are also used places where the ownership hierarchy is important, for example when using `clusterctl move`. + +CAPI uses owner references in an opinionated way. The following guidelines should be considered: +1. Objects should always be created with an owner reference to prevent leaking objects. Initial ownerReferences can be replaced later where another object is a more appropriate owner. +2. Owner references should be re-reconciled if they are lost for an object. This is required as some tools - e.g. velero - may delete owner references on objects. +3. Owner references should be kept to the most recent apiVersion. + - This ensures garbage collection still works after an old apiVersion is no longer served. +4. Owner references should not be added unless required. + - Multiple owner references on a single object should be exceptional. + + + + +## Owner reference relationships in Cluster API + +The below tables map out the a reference for ownership relationships for the objects in a Cluster API cluster. The tables are identical for classy and non-classy clusters. + + +Providers may implement their own ownership relationships which may or may not map directly to the below tables. +These owner references are almost all tested in an [end-to-end test](https://github.com/kubernetes-sigs/cluster-api/blob/caaa74482b51fae777334cd7a29595da1c06481e/test/e2e/quick_start_test.go#L31). Lack of testing is noted where this is not the case. CAPI Providers can take advantage of the e2e test framework to ensure their owner references are predictable, documented and stable. + + Kubernetes core types + +| type | Owner | Controller | Note | +|-----------|---------------------|------------|--------------------------------------------| +| Secret | KubeadmControlPlane | yes | For cluster certificates | +| Secret | KubeadmConfig | yes | For bootstrap secrets | +| Secret | ClusterResourceSet | no | When referenced by CRS. Not tested in e2e. | +| ConfigMap | ClusterResourceSet | no | When referenced by CRS | + +## Core types + +| type | Owner | Controller | Note | +|---------------------|---------------------|------------|----------------------------| +| ExtensionConfig | None | | | +| ClusterClass | None | | | +| Cluster | None | | | +| MachineDeployments | Cluster | no | | +| MachineSet | MachineDeployment | yes | | +| Machine | MachineSet | yes | When created by MachineSet | +| Machine | KubeadmControlPlane | yes | When created by KCP | +| MachineHealthChecks | Cluster | no | | + + + +## Experimental types +| type | Owner | Controller | Note | +|----------------------------|--------------------|------------|--------------------------| +| ClusterResourcesSet | None | | | +| ClusterResourcesSetBinding | ClusterResourceSet | no | May have many CRS owners | +| MachinePool | Cluster | no | | + + +## KubeadmControlPlane types +| type | Owner | Controller | Note | +|-----------------------------|--------------|------------|------| +| KubeadmControlPlane | Cluster | yes | | +| KubeadmControlPlaneTemplate | ClusterClass | no | | + + +## Kubeadm bootstrap types +| type | Owner | Controller | Note | +|-----------------------|--------------|------------|-------------------------------------------------| +| KubeadmConfig | Machine | yes | When created for Machine | +| KubeadmConfig | MachinePool | yes | When created for MachinePool | +| KubeadmConfigTemplate | Cluster | no | When referenced in MachineDeployment spec | +| KubeadmConfigTemplate | ClusterClass | no | When referenced in ClusterClass | + +## Infrastructure provider types +| type | Owner | Controller | Note | +|-------------------------------|--------------|------------|---------------------------------------------| +| InfrastructureMachine | Machine | yes | | +| InfrastructureMachineTemplate | Cluster | no | When created by cluster topology controller | +| InfrastructureMachineTemplate | ClusterClass | no | When referenced in a ClusterClass | +| InfrastructureCluster | Cluster | yes | | +| InfrastructureClusterTemplate | ClusterClass | no | | +| InfrastructureMachinePool | MachinePool | yes | | + + diff --git a/docs/book/src/reference/providers.md b/docs/book/src/reference/providers.md index d233738eb058..cd598ae10af2 100644 --- a/docs/book/src/reference/providers.md +++ b/docs/book/src/reference/providers.md @@ -9,15 +9,23 @@ updated info about which API version they are supporting. - [Amazon Elastic Kubernetes Service (EKS)](https://github.com/kubernetes-sigs/cluster-api-provider-aws/tree/main/bootstrap/eks) - [Kubeadm](https://github.com/kubernetes-sigs/cluster-api/tree/main/bootstrap/kubeadm) - [MicroK8s](https://github.com/canonical/cluster-api-bootstrap-provider-microk8s) +- [Oracle Cloud Native Environment (OCNE)](https://github.com/verrazzano/cluster-api-provider-ocne) - [Talos](https://github.com/siderolabs/cluster-api-bootstrap-provider-talos) +- [K3s](https://github.com/cluster-api-provider-k3s/cluster-api-k3s) +- [k0smotron/k0s](https://github.com/k0sproject/k0smotron) ## Control Plane - [Kubeadm](https://github.com/kubernetes-sigs/cluster-api/tree/main/controlplane/kubeadm) - [MicroK8s](https://github.com/canonical/cluster-api-control-plane-provider-microk8s) - [Nested](https://github.com/kubernetes-sigs/cluster-api-provider-nested) +- [Oracle Cloud Native Environment (OCNE)](https://github.com/verrazzano/cluster-api-provider-ocne) - [Talos](https://github.com/siderolabs/cluster-api-control-plane-provider-talos) +- [Kamaji](https://github.com/clastix/cluster-api-control-plane-provider-kamaji) +- [K3s](https://github.com/cluster-api-provider-k3s/cluster-api-k3s) +- [k0smotron/k0s](https://github.com/k0sproject/k0smotron) ## Infrastructure +- [Akamai (Linode)](https://linode.github.io/cluster-api-provider-linode/) - [AWS](https://cluster-api-aws.sigs.k8s.io/) - [Azure](https://github.com/kubernetes-sigs/cluster-api-provider-azure) - [Azure Stack HCI](https://github.com/microsoft/cluster-api-provider-azurestackhci) @@ -26,8 +34,9 @@ updated info about which API version they are supporting. - [CoxEdge](https://github.com/coxedge/cluster-api-provider-coxedge) - [DigitalOcean](https://github.com/kubernetes-sigs/cluster-api-provider-digitalocean) - [Equinix Metal (formerly Packet)](https://github.com/kubernetes-sigs/cluster-api-provider-packet) -- [Google Cloud Platform (GCP)](https://github.com/kubernetes-sigs/cluster-api-provider-gcp) +- [Google Cloud Platform (GCP)](https://cluster-api-gcp.sigs.k8s.io/) - [Hetzner](https://github.com/syself/cluster-api-provider-hetzner) +- [Hivelocity](https://github.com/hivelocity/cluster-api-provider-hivelocity) - [IBM Cloud](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud) - [KubeKey](https://github.com/kubesphere/kubekey) - [KubeVirt](https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt) @@ -39,12 +48,20 @@ updated info about which API version they are supporting. - [Oracle Cloud Infrastructure (OCI)](https://github.com/oracle/cluster-api-provider-oci) - [OpenStack](https://github.com/kubernetes-sigs/cluster-api-provider-openstack) - [Outscale](https://github.com/outscale/cluster-api-provider-outscale) +- [Proxmox](https://github.com/ionos-cloud/cluster-api-provider-proxmox) - [Sidero](https://github.com/siderolabs/sidero) - [Tinkerbell](https://github.com/tinkerbell/cluster-api-provider-tinkerbell) - [vcluster](https://github.com/loft-sh/cluster-api-provider-vcluster) - [Virtink](https://github.com/smartxworks/cluster-api-provider-virtink) - [VMware Cloud Director](https://github.com/vmware/cluster-api-provider-cloud-director) - [vSphere](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere) +- [k0smotron RemoteMachine (SSH)](https://github.com/k0sproject/k0smotron) + +## IP Address Management (IPAM) +- [In Cluster](https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster) + +## Addon +- [Helm](https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/) ## API Adopters diff --git a/docs/book/src/reference/versions.md b/docs/book/src/reference/versions.md index 3d8496e4508f..b587c2d51708 100644 --- a/docs/book/src/reference/versions.md +++ b/docs/book/src/reference/versions.md @@ -20,20 +20,22 @@ A Cluster API minor release supports (when it's initially created): * 6 Kubernetes minor releases for the workload cluster (N - N-5) When a new Kubernetes minor release is available, we will try to support it in an upcoming Cluster API patch release -(although only in the latest supported Cluster API minor release). But this depends on the changes made in Kubernetes, if -the corresponding required changes in Cluster API are too invasive we won't backport the support and users have to wait -for the next Cluster API minor release. +(although only in the latest supported Cluster API minor release). See Cluster API [release cycle](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/release/release-cycle.md) +and [release calendars](https://github.com/kubernetes-sigs/cluster-api/tree/main/docs/release/releases) for more details. -For example, Cluster API v1.4.0 would support the following Kubernetes versions: -* v1.23.x to v1.26.x for the management cluster -* v1.21.x to v1.26.x for the workload cluster -* When Kubernetes 1.27 is released, it will be supported in v1.4.x (but not in v1.3.x) +For example, Cluster API v1.7.0 would support the following Kubernetes versions: +* v1.26.x to v1.29.x for the management cluster +* v1.24.x to v1.29.x for the workload cluster +* When Kubernetes 1.30 is released, it will be supported in v1.7.x (but not in v1.6.x) Support in this context means that we: * maintain corresponding code paths * have test coverage * accept bug fixes +Important! if the changes in Cluster API required to support a new Kubernetes release are too invasive, we won't backport +it to older releases and users have to wait for the next Cluster API minor release. + Important! This is not a replacement/alternative for upstream Kubernetes support policies! Support for versions of Kubernetes which itself are out of support is limited to "Cluster API can start a Cluster with this Kubernetes version" and "Cluster API can upgrade to the next Kubernetes version"; it does not include any extended support to Kubernetes itself. @@ -55,8 +57,8 @@ The Core Provider, Kubeadm Bootstrap Provider, and Kubeadm Control Plane Provide In some cases, the Management Cluster is separate from the Workload Clusters. The Kubernetes version of the Management and Workload Clusters are allowed to be different. Management Clusters and Workload Clusters can be upgraded independently and in any order, however, if you are additionally moving from -v1alpha3 (v0.3.x) to v1beta1 (v1.x) as part of the upgrade rollout, the management cluster will need to be upgraded to at least v1.20.x, -prior to upgrading any workload cluster using Cluster API v1beta1 (v1.x) +v1alpha3 (v0.3.x) or v1alpha4 (v0.4.x) to v1beta1 (v1.x) as part of the upgrade, prior to upgrading any workload cluster using Cluster API v1beta1, +the management cluster will need to be upgraded the at least the minimum supported Kubernetes version for your target CAPI version. These diagrams show the relationships between components in a Cluster API release (yellow), and other components (white). @@ -72,55 +74,53 @@ These diagrams show the relationships between components in a Cluster API releas #### Core Provider (`cluster-api-controller`) -| | v1.1 (v1beta1) (EOL) | v1.2 (v1beta1) | v1.3 (v1beta1) | v1.4 (v1beta1) | +| | v1.5 (v1beta1) (EOL) | v1.6 (v1beta1) | v1.7 (v1beta1) | v1.8 (v1beta1) | |-------------------|----------------------|-------------------|-------------------|-------------------| -| Kubernetes v1.18 | ✓ (only workload) | ✓ (only workload) | ✓ (only workload) | | -| Kubernetes v1.19 | ✓ | ✓ (only workload) | ✓ (only workload) | | -| Kubernetes v1.20 | ✓ | ✓ | ✓ | | -| Kubernetes v1.21 | ✓ | ✓ | ✓ | ✓ (only workload) | -| Kubernetes v1.22 | ✓ | ✓ | ✓ | ✓ (only workload) | -| Kubernetes v1.23* | ✓ | ✓ | ✓ | ✓ | -| Kubernetes v1.24 | ✓ | ✓ | ✓ | ✓ | -| Kubernetes v1.25 | | ✓ | ✓ | ✓ | -| Kubernetes v1.26 | | ✓ | ✓ | ✓ | +| Kubernetes v1.22 | ✓ (only workload) | | | | +| Kubernetes v1.23* | ✓ (only workload) | ✓ (only workload) | | | +| Kubernetes v1.24 | ✓ | ✓ (only workload) | ✓ (only workload) | | +| Kubernetes v1.25 | ✓ | ✓ | ✓ (only workload) | ✓ (only workload) | +| Kubernetes v1.26 | ✓ | ✓ | ✓ | ✓ (only workload) | +| Kubernetes v1.27 | ✓ | ✓ | ✓ | ✓ | +| Kubernetes v1.28 | ✓ >= v1.5.1 | ✓ | ✓ | ✓ | +| Kubernetes v1.29 | | ✓ >= v1.6.1 | ✓ | ✓ | +| Kubernetes v1.30 | | | ✓ >= v1.7.1 | ✓ | \* There is an issue with CRDs in Kubernetes v1.23.{0-2}. ClusterClass with patches is affected by that (for more details please see [this issue](https://github.com/kubernetes-sigs/cluster-api/issues/5990)). Therefore we recommend to use Kubernetes v1.23.3+ with ClusterClass. Previous Kubernetes **minor** versions are not affected. -\** When using CAPI v1.2 or v1.3 with the CLUSTER_TOPOLOGY experimental feature on, the Kubernetes Version for the management cluster must be >= 1.22.0. - The Core Provider also talks to API server of every Workload Cluster. Therefore, the Workload Cluster's Kubernetes version must also be compatible. #### Kubeadm Bootstrap Provider (`kubeadm-bootstrap-controller`) -| | v1.1 (v1beta1) (EOL) | v1.2 (v1beta1) | v1.3 (v1beta1) | v1.4 (v1beta1) | -|------------------------------------|----------------------|-------------------|-------------------|--------------------| -| Kubernetes v1.18 + kubeadm/v1beta2 | ✓ (only workload) | ✓ (only workload) | ✓ (only workload) | | -| Kubernetes v1.19 + kubeadm/v1beta2 | ✓ | ✓ (only workload) | ✓ (only workload) | | -| Kubernetes v1.20 + kubeadm/v1beta2 | ✓ | ✓ | ✓ | | -| Kubernetes v1.21 + kubeadm/v1beta2 | ✓ | ✓ | ✓ | ✓ (only workload) | -| Kubernetes v1.22 + kubeadm/v1beta3 | ✓ | ✓ | ✓ | ✓ (only workload) | -| Kubernetes v1.23 + kubeadm/v1beta3 | ✓ | ✓ | ✓ | ✓ | -| Kubernetes v1.24 + kubeadm/v1beta3 | ✓ | ✓ | ✓ | ✓ | -| Kubernetes v1.25 + kubeadm/v1beta3 | | ✓ | ✓ | ✓ | -| Kubernetes v1.26 + kubeadm/v1beta3 | | ✓ | ✓ | ✓ | +| | v1.5 (v1beta1) (EOL) | v1.6 (v1beta1) | v1.7 (v1beta1) | v1.8 (v1beta1) | +|------------------------------------|----------------------|--------------------|--------------------|--------------------| +| Kubernetes v1.22 + kubeadm/v1beta3 | ✓ (only workload) | | | | +| Kubernetes v1.23 + kubeadm/v1beta3 | ✓ (only workload) | ✓ (only workload) | | | +| Kubernetes v1.24 + kubeadm/v1beta3 | ✓ | ✓ (only workload) | ✓ (only workload) | | +| Kubernetes v1.25 + kubeadm/v1beta3 | ✓ | ✓ | ✓ (only workload) | ✓ (only workload) | +| Kubernetes v1.26 + kubeadm/v1beta3 | ✓ | ✓ | ✓ | ✓ (only workload) | +| Kubernetes v1.27 + kubeadm/v1beta3 | ✓ | ✓ | ✓ | ✓ | +| Kubernetes v1.28 + kubeadm/v1beta3 | ✓ >= v1.5.1 | ✓ | ✓ | ✓ | +| Kubernetes v1.29 + kubeadm/v1beta3 | | ✓ >= v1.6.1 | ✓ | ✓ | +| Kubernetes v1.30 + kubeadm/v1beta3 | | | ✓ >= v1.7.1 | ✓ | The Kubeadm Bootstrap Provider generates kubeadm configuration using the API version recommended for the target Kubernetes version. #### Kubeadm Control Plane Provider (`kubeadm-control-plane-controller`) -| | v1.1 (v1beta1) (EOL) | v1.2 (v1beta1) | v1.3 (v1beta1) | v1.4 (v1beta1) | +| | v1.5 (v1beta1) (EOL) | v1.6 (v1beta1) | v1.7 (v1beta1) | v1.8 (v1beta1) | |----------------------------|----------------------|-------------------|-------------------|-------------------| -| Kubernetes v1.18 + etcd/v3 | ✓ (only workload) | ✓ (only workload) | ✓ (only workload) | | -| Kubernetes v1.19 + etcd/v3 | ✓ | ✓ (only workload) | ✓ (only workload) | | -| Kubernetes v1.20 + etcd/v3 | ✓ | ✓ | ✓ | | -| Kubernetes v1.21 + etcd/v3 | ✓ | ✓ | ✓ | ✓ (only workload) | -| Kubernetes v1.22 + etcd/v3 | ✓ | ✓ | ✓ | ✓ (only workload) | -| Kubernetes v1.23 + etcd/v3 | ✓ | ✓ | ✓ | ✓ | -| Kubernetes v1.24 + etcd/v3 | ✓ | ✓ | ✓ | ✓ | -| Kubernetes v1.25 + etcd/v3 | | ✓ | ✓ | ✓ | -| Kubernetes v1.26 + etcd/v3 | | ✓ | ✓ | ✓ | +| Kubernetes v1.22 + etcd/v3 | ✓ (only workload) | | | | +| Kubernetes v1.23 + etcd/v3 | ✓ (only workload) | ✓ (only workload) | | | +| Kubernetes v1.24 + etcd/v3 | ✓ | ✓ (only workload) | ✓ (only workload) | | +| Kubernetes v1.25 + etcd/v3 | ✓ | ✓ | ✓ (only workload) | ✓ (only workload) | +| Kubernetes v1.26 + etcd/v3 | ✓ | ✓ | ✓ | ✓ (only workload) | +| Kubernetes v1.27 + etcd/v3 | ✓ | ✓ | ✓ | ✓ | +| Kubernetes v1.28 + etcd/v3 | ✓ >= v1.5.1 | ✓ | ✓ | ✓ | +| Kubernetes v1.29 + etcd/v3 | | ✓ >= v1.6.1 | ✓ | ✓ | +| Kubernetes v1.30 + etcd/v3 | | | ✓ >= v1.7.1 | ✓ | The Kubeadm Control Plane Provider talks to the API server and etcd members of every Workload Cluster whose control plane it owns. It uses the etcd v3 API. @@ -132,21 +132,15 @@ The Kubeadm Control Plane requires the Kubeadm Bootstrap Provider. | CAPI Version | Max CoreDNS Version for Upgrade | |----------------------|---------------------------------| -| v1.1 (v1beta1) | v1.9.3 | -| v1.2 (v1beta1) | v1.9.3 | -| >= v1.2.7 (v1beta1) | v1.10.0 | -| >= v1.2.11 (v1beta1) | v1.10.1 | -| v1.3 (v1beta1) | v1.10.0 | -| >= v1.3.4 (v1beta1) | v1.10.1 | -| v1.4 (v1beta1) | v1.10.1 | +| v1.5 (v1beta1) | v1.10.1 | +| >= v1.5.1 (v1beta1) | v1.11.1 | +| v1.6 (v1beta1) | v1.11.1 | +| v1.7 (v1beta1) | v1.11.1 | #### Kubernetes version specific notes -**1.26**: -* No specific notes - -**1.25**: -* No specific notes +**1.29**: +* In-tree cloud providers are now switched off by default. Please use DisableCloudProviders and DisableKubeletCloudCredentialProvider feature flags if you still need this functionality. (https://github.com/kubernetes/kubernetes/pull/117503) **1.24**: * Kubeadm Bootstrap Provider: diff --git a/docs/book/src/roadmap.md b/docs/book/src/roadmap.md deleted file mode 100644 index 44310ebcd504..000000000000 --- a/docs/book/src/roadmap.md +++ /dev/null @@ -1,3 +0,0 @@ -# Cluster API Roadmap - -You can find the Cluster API roadmap discussion at [GitHub](https://github.com/kubernetes-sigs/cluster-api/discussions/5556). Please feel free to participate! diff --git a/docs/book/src/tasks/automated-machine-management/autoscaling.md b/docs/book/src/tasks/automated-machine-management/autoscaling.md index ca8dbca37e5e..442152b71865 100644 --- a/docs/book/src/tasks/automated-machine-management/autoscaling.md +++ b/docs/book/src/tasks/automated-machine-management/autoscaling.md @@ -12,17 +12,17 @@ from the [Autoscaler project documentation](https://github.com/kubernetes/autosc diff --git a/docs/book/src/tasks/automated-machine-management/healthchecking.md b/docs/book/src/tasks/automated-machine-management/healthchecking.md index 7706a71b0e7d..5b2afa8763e0 100644 --- a/docs/book/src/tasks/automated-machine-management/healthchecking.md +++ b/docs/book/src/tasks/automated-machine-management/healthchecking.md @@ -20,7 +20,7 @@ A MachineHealthCheck is a resource within the Cluster API which allows users to A MachineHealthCheck is defined on a management cluster and scoped to a particular workload cluster. When defining a MachineHealthCheck, users specify a timeout for each of the conditions that they define to check on the Machine's Node. -If any of these conditions are met for the duration of the timeout, the Machine will be remediated. +If any of these conditions are met for the duration of the timeout, the Machine will be remediated. Also, Machines with `failureMessage` or `failureMessage` (terminal failures) are automatically remediated. By default, the action of remediating a Machine should trigger a new Machine to be created to replace the failed one, but providers are allowed to plug in more sophisticated external remediation solutions. ## Creating a MachineHealthCheck @@ -92,6 +92,57 @@ in order to prevent conflicts or unexpected behaviors when trying to remediate t +## Controlling remediation retries + + + +KubeadmControlPlane allows to control how remediation happen by defining an optional `remediationStrategy`; +this feature can be used for preventing unnecessary load on infrastructure provider e.g. in case of quota problems,or for allowing the infrastructure provider to stabilize in case of temporary problems. + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: my-control-plane +spec: + ... + remediationStrategy: + maxRetry: 5 + retryPeriod: 2m + minHealthyPeriod: 2h +``` + +`maxRetry` is the maximum number of retries while attempting to remediate an unhealthy machine. +A retry happens when a machine that was created as a replacement for an unhealthy machine also fails. +For example, given a control plane with three machines M1, M2, M3: + +- M1 become unhealthy; remediation happens, and M1-1 is created as a replacement. +- If M1-1 (replacement of M1) has problems while bootstrapping it will become unhealthy, and then be + remediated. This operation is considered a retry - remediation-retry #1. +- If M1-2 (replacement of M1-1) becomes unhealthy, remediation-retry #2 will happen, etc. + +A retry will only happen after the `retryPeriod` from the previous retry has elapsed. If `retryPeriod` is not set (default), a retry will happen immediately. + +If a machine is marked as unhealthy after `minHealthyPeriod` (default 1h) has passed since the previous remediation this is no longer considered a retry because the new issue is assumed unrelated from the previous one. + +If `maxRetry` is not set (default), remediation will be retried infinitely. + + + ## Remediation Short-Circuiting To ensure that MachineHealthChecks only remediate Machines when the cluster is healthy, @@ -150,7 +201,7 @@ If both `maxUnhealthy` and `unhealthyRange` are specified, `unhealthyRange` take If `unhealthyRange` is set to `[3-5]` and there are 10 Machines being checked: - If 2 or fewer nodes are unhealthy, remediation will not be performed. -- If 5 or more nodes are unhealthy, remediation will not be performed. +- If 6 or more nodes are unhealthy, remediation will not be performed. - In all other cases, remediation will be performed. Note, the above example had 10 machines as sample set. But, this would work the same way for any other number. @@ -174,9 +225,19 @@ Before deploying a MachineHealthCheck, please familiarise yourself with the foll - Only Machines owned by a MachineSet or a KubeadmControlPlane can be remediated by a MachineHealthCheck (since a MachineDeployment uses a MachineSet, then this includes Machines that are part of a MachineDeployment) - Machines managed by a KubeadmControlPlane are remediated according to [the delete-and-recreate guidelines described in the KubeadmControlPlane proposal](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20191017-kubeadm-based-control-plane.md#remediation-using-delete-and-recreate) + - The following rules should be satisfied in order to start remediation of a control plane machine: + - One of the following apply: + - The cluster MUST not be initialized yet (the failure happens before KCP reaches the initialized state) + - The cluster MUST have at least two control plane machines, because this is the smallest cluster size that can be remediated. + - Previous remediation (delete and re-create) MUST have been completed. This rule prevents KCP from remediating more machines while the replacement for the previous machine is not yet created. + - The cluster MUST have no machines with a deletion timestamp. This rule prevents KCP taking actions while the cluster is in a transitional state. + - Remediation MUST preserve etcd quorum. This rule ensures that we will not remove a member that would result in etcd losing a majority of members and thus become unable to field new requests (note: this rule applies only to CP already initialized and with managed etcd) - If the Node for a Machine is removed from the cluster, a MachineHealthCheck will consider this Machine unhealthy and remediate it immediately - If no Node joins the cluster for a Machine after the `NodeStartupTimeout`, the Machine will be remediated - If a Machine fails for any reason (if the FailureReason is set), the Machine will be remediated immediately +- Important: if the kubelet on the node hosting the etcd leader member is not working, this prevents KCP from doing some checks it is expecting to do on the leader - and specifically on the leader -. + This prevents remediation to happen. There are ongoing discussions about how to overcome this limitation in https://github.com/kubernetes-sigs/cluster-api/issues/8465; as of today users facing this situation + are recommended to manually forward leadership to another etcd member and manually delete the corresponding machine. [management cluster]: ../../reference/glossary.md#management-cluster diff --git a/docs/book/src/tasks/bootstrap/kubeadm-bootstrap.md b/docs/book/src/tasks/bootstrap/kubeadm-bootstrap/index.md similarity index 93% rename from docs/book/src/tasks/bootstrap/kubeadm-bootstrap.md rename to docs/book/src/tasks/bootstrap/kubeadm-bootstrap/index.md index 82f2f3b47b86..71215b547e9d 100644 --- a/docs/book/src/tasks/bootstrap/kubeadm-bootstrap.md +++ b/docs/book/src/tasks/bootstrap/kubeadm-bootstrap/index.md @@ -42,13 +42,7 @@ metadata: name: my-control-plane1-config spec: initConfiguration: - nodeRegistration: - kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% - clusterConfiguration: - controllerManager: - extraArgs: - enable-hostpath-provisioner: "true" + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- kind: DockerMachine apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 @@ -119,12 +113,7 @@ metadata: spec: initConfiguration: nodeRegistration: - kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% - clusterConfiguration: - controllerManager: - extraArgs: - enable-hostpath-provisioner: "true" + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. ``` Additional control plane nodes: @@ -136,8 +125,7 @@ metadata: spec: joinConfiguration: nodeRegistration: - kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. controlPlane: {} ``` @@ -150,8 +138,7 @@ metadata: spec: joinConfiguration: nodeRegistration: - kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. ``` ### Bootstrap Orchestration diff --git a/docs/book/src/tasks/bootstrap/kubeadm-bootstrap/kubelet-config.md b/docs/book/src/tasks/bootstrap/kubeadm-bootstrap/kubelet-config.md new file mode 100644 index 000000000000..085f373cc340 --- /dev/null +++ b/docs/book/src/tasks/bootstrap/kubeadm-bootstrap/kubelet-config.md @@ -0,0 +1,349 @@ +# Kubelet Configuration + +CAPBK has several ways to configure kubelet. + +- [Kubelet Configuration](#kubelet-configuration) + - [Pass `KubeletConfiguration` file via `KubeadmConfigSpec.files`](#pass-kubeletconfiguration-file-via-kubeadmconfigspecfiles) + - [KubeadmControlPlaneTemplate](#kubeadmcontrolplanetemplate) + - [KubeadmConfigTemplate](#kubeadmconfigtemplate) + - [Set kubelet flags via `KubeadmConfigSpec.kubeletExtraArgs`](#set-kubelet-flags-via-kubeadmconfigspeckubeletextraargs) + - [KubeadmControlPlaneTemplate](#kubeadmcontrolplanetemplate-1) + - [KubeadmConfigTemplate](#kubeadmconfigtemplate-1) + - [Use kubeadm's `kubeletconfiguration` patch target](#use-kubeadms-kubeletconfiguration-patch-target) + - [KubeadmControlPlaneTemplate](#kubeadmcontrolplanetemplate-2) + - [KubeadmConfigTemplate](#kubeadmconfigtemplate-2) + +## Pass `KubeletConfiguration` file via `KubeadmConfigSpec.files` + +You can use `KubeadmConfigSpec.files` to put any files on nodes. This example puts a `KubeletConfiguration` file on nodes via `KubeadmConfigSpec.files`, and makes kubelet use it via `KubeadmConfigSpec.kubeletExtraArgs`. You can check available configurations of `KubeletConfiguration` on [Kubelet Configuration (v1beta1) | Kubernetes](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration). + +This method is easy to replace the whole kubelet configuration generated by kubeadm, but it is not easy to replace only a part of the kubelet configuration. + +### KubeadmControlPlaneTemplate + +```yaml +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: cloudinit-control-plane + namespace: default +spec: + template: + spec: + kubeadmConfigSpec: + files: + # We put a KubeletConfiguration file on nodes via KubeadmConfigSpec.files + # In this example, we directly put the file content in the KubeadmConfigSpec.files.content field. + - path: /etc/kubernetes/kubelet/config.yaml + owner: "root:root" + permissions: "0644" + content: | + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + kubeReserved: + cpu: "1" + memory: "2Gi" + ephemeral-storage: "1Gi" + systemReserved: + cpu: "500m" + memory: "1Gi" + ephemeral-storage: "1Gi" + evictionHard: + memory.available: "500Mi" + nodefs.available: "10%" + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.128.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: "" + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + fileCheckFrequency: 0s + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + initConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + # Here we configure kubelet to use the KubeletConfiguration file we put on nodes via KubeadmConfigSpec.files + kubeletExtraArgs: + config: "/etc/kubernetes/kubelet/config.yaml" + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + # Here we configure kubelet to use the KubeletConfiguration file we put on nodes via KubeadmConfigSpec.files + kubeletExtraArgs: + config: "/etc/kubernetes/kubelet/config.yaml" +``` + +### KubeadmConfigTemplate + +```yaml +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: cloudinit-default-worker-bootstraptemplate + namespace: default +spec: + template: + spec: + files: + # We puts a KubeletConfiguration file on nodes via KubeadmConfigSpec.files + # In this example, we directly put the file content in the KubeadmConfigSpec.files.content field. + - path: /etc/kubernetes/kubelet/config.yaml + owner: "root:root" + permissions: "0644" + content: | + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + kubeReserved: + cpu: "1" + memory: "2Gi" + ephemeral-storage: "1Gi" + systemReserved: + cpu: "500m" + memory: "1Gi" + ephemeral-storage: "1Gi" + evictionHard: + memory.available: "500Mi" + nodefs.available: "10%" + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.128.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: "" + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + fileCheckFrequency: 0s + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + # Here we configure kubelet to use the KubeletConfiguration file we put on nodes via KubeadmConfigSpec.files + kubeletExtraArgs: + config: "/etc/kubernetes/kubelet/config.yaml" +``` + +## Set kubelet flags via `KubeadmConfigSpec.kubeletExtraArgs` + +We can pass kubelet command-line flags via `KubeadmConfigSpec.kubeletExtraArgs`. This example is equivalent to setting `--kube-reserved`, `--system-reserved`, and `--eviction-hard` flags for the kubelet command. + +This method is useful when you want to set kubelet flags that are not configurable via the `KubeletConfiguration` file, however, it is not recommended to use this method to set flags that are configurable via the `KubeletConfiguration` file. + +### KubeadmControlPlaneTemplate + +```yaml +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: kubelet-extra-args-control-plane + namespace: default +spec: + template: + spec: + kubeadmConfigSpec: + initConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + # Set kubelet flags via KubeadmConfigSpec.kubeletExtraArgs + kubeletExtraArgs: + kube-reserved: cpu=1,memory=2Gi,ephemeral-storage=1Gi + system-reserved: cpu=500m,memory=1Gi,ephemeral-storage=1Gi + eviction-hard: memory.available<500Mi,nodefs.available<10% + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + # Set kubelet flags via KubeadmConfigSpec.kubeletExtraArgs + kubeletExtraArgs: + kube-reserved: cpu=1,memory=2Gi,ephemeral-storage=1Gi + system-reserved: cpu=500m,memory=1Gi,ephemeral-storage=1Gi + eviction-hard: memory.available<500Mi,nodefs.available<10% +``` + +### KubeadmConfigTemplate + +```yaml +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: kubelet-extra-args-default-worker-bootstraptemplate + namespace: default +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + # Set kubelet flags via KubeadmConfigSpec.kubeletExtraArgs + kubeletExtraArgs: + kube-reserved: cpu=1,memory=2Gi,ephemeral-storage=1Gi + system-reserved: cpu=500m,memory=1Gi,ephemeral-storage=1Gi + eviction-hard: memory.available<500Mi,nodefs.available<10% +``` + +## Use kubeadm's `kubeletconfiguration` patch target + +We can use kubeadm's `kubeletconfiguration` patch target to patch the kubelet configuration file. In this example, we put a patch file for `kubeletconfiguration` target in `strategic` `patchtype` on nodes via `KubeadmConfigSpec.files`. For more details, see [Customizing components with the kubeadm API | Kubernetes](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#patches) + +This method is useful when you want to change the kubelet configuration file partially on specific nodes. For example, you can deploy a partially patched kubelet configuration file on specific nodes based on the default configuration used for `kubeadm init` or `kubeadm join`. + +### KubeadmControlPlaneTemplate + +```yaml +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: kubeadm-config-template-control-plane + namespace: default +spec: + template: + spec: + kubeadmConfigSpec: + files: + # Here we put a patch file for kubeletconfiguration target in strategic patchtype on nodes via KubeadmConfigSpec.files + # The naming convention of the patch file is kubeletconfiguration{suffix}+{patchtype}.json where {suffix} is an string and {patchtype} is one of the following: strategic, merge, json. + # {suffix} determines the order of the patch files. The patches are applied in the alpha-numerical order of the {suffix}. + - path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.json + owner: "root:root" + permissions: "0644" + content: | + { + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + "kubeReserved": { + "cpu": "1", + "memory": "2Gi", + "ephemeral-storage": "1Gi", + }, + "systemReserved": { + "cpu": "500m", + "memory": "1Gi", + "ephemeral-storage": "1Gi", + }, + "evictionHard": { + "memory.available": "500Mi", + "nodefs.available": "10%", + }, + } + initConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + # Here we specify the directory that contains the patch files + patches: + directory: /etc/kubernetes/patches + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + # Here we specify the directory that contains the patch files + patches: + directory: /etc/kubernetes/patches +``` + +### KubeadmConfigTemplate + +```yaml +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: kubeadm-config-template-default-worker-bootstraptemplate + namespace: default +spec: + template: + spec: + files: + # Here we put a patch file for kubeletconfiguration target in strategic patchtype on nodes via KubeadmConfigSpec.files + # The naming convention of the patch file is kubeletconfiguration{suffix}+{patchtype}.json where {suffix} is an string and {patchtype} is one of the following: strategic, merge, json. + # {suffix} determines the order of the patch files. The patches are applied in the alpha-numerical order of the {suffix}. + - path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.json + owner: "root:root" + permissions: "0644" + content: | + { + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + "kubeReserved": { + "cpu": "1", + "memory": "2Gi", + "ephemeral-storage": "1Gi", + }, + "systemReserved": { + "cpu": "500m", + "memory": "1Gi", + "ephemeral-storage": "1Gi", + }, + "evictionHard": { + "memory.available": "500Mi", + "nodefs.available": "10%", + }, + } + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + # Here we specify the directory that contains the patch files + patches: + directory: /etc/kubernetes/patches +``` \ No newline at end of file diff --git a/docs/book/src/tasks/bootstrap/microk8s-bootstrap.md b/docs/book/src/tasks/bootstrap/microk8s-bootstrap.md index 65734b5fedac..5dfc805039a7 100644 --- a/docs/book/src/tasks/bootstrap/microk8s-bootstrap.md +++ b/docs/book/src/tasks/bootstrap/microk8s-bootstrap.md @@ -85,7 +85,7 @@ Some of the configuration options available via `MicroK8sConfig` are: ### How does CABPM work? -The main purpose of the MicroK8s bootstrap provider is to translate the users needs to the a number of cloud-init files applicable for each type of cluster nodes. There are three types of cloud-inits: +The main purpose of the MicroK8s bootstrap provider is to translate the users needs to a number of cloud-init files applicable for each type of cluster nodes. There are three types of cloud-inits: - The first node cloud-init. That node will be a control plane node and will be the one where the addons are enabled. - The control plane node cloud-init. The control plane nodes need to join a cluster and contribute to its HA. diff --git a/docs/book/src/tasks/certs/using-custom-certificates.md b/docs/book/src/tasks/certs/using-custom-certificates.md index b89373cd162f..267ac83abdf9 100644 --- a/docs/book/src/tasks/certs/using-custom-certificates.md +++ b/docs/book/src/tasks/certs/using-custom-certificates.md @@ -11,6 +11,7 @@ Each certificate must be stored in a single secret named one of: | *[cluster name]***-proxy** | CA | openssl req -x509 -subj "/CN=Front-End Proxy" -new -newkey rsa:2048 -nodes -keyout tls.key -sha256 -days 3650 -out tls.crt | | *[cluster name]***-sa** | Key Pair | openssl genrsa -out tls.key 2048 && openssl rsa -in tls.key -pubout -out tls.crt | +The certificates *must* also be labeled with the key-value pair `cluster.x-k8s.io/cluster-name=[cluster name]` (where `[cluster name]` is the name of the cluster it should be used with). +## ClusterClass with MachinePools + +ClusterClass also supports MachinePool workers. They work very similar to MachineDeployments. MachinePools +can be specified in the ClusterClass template under the workers section like so: + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: docker-clusterclass-v0.1.0 +spec: + workers: + machinePools: + - class: default-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: quick-start-default-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachinePoolTemplate + name: quick-start-default-worker-machinepooltemplate +``` + +They can then be similarly defined as workers in the cluster template like so: + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: my-docker-cluster +spec: + topology: + workers: + machinePools: + - class: default-worker + name: mp-0 + replicas: 4 + metadata: + labels: + mpLabel: mpLabelValue + annotations: + mpAnnotation: mpAnnotationValue + failureDomain: region +``` + ## ClusterClass with MachineHealthChecks `MachineHealthChecks` can be configured in the ClusterClass for the control plane and for a @@ -289,15 +341,112 @@ a default value, the value is automatically added to the variables list. +## ClusterClass with custom naming strategies + +The controller needs to generate names for new objects when a Cluster is getting created +from a ClusterClass. These names have to be unique for each namespace. The naming +strategy enables this by concatenating the cluster name with a random suffix. + +It is possible to provide a custom template for the name generation of ControlPlane, MachineDeployment +and MachinePool objects. + +The generated names must comply with the [RFC 1123](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names) standard. + +### Defining a custom naming strategy for ControlPlane objects + +The naming strategy for ControlPlane supports the following properties: + +- `template`: Custom template which is used when generating the name of the ControlPlane object. + +The following variables can be referenced in templates: + +- `.cluster.name`: The name of the cluster object. +- `.random`: A random alphanumeric string, without vowels, of length 5. + +Example which would match the default behavior: + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: docker-clusterclass-v0.1.0 +spec: + controlPlane: + ... + namingStrategy: + template: "{{ .cluster.name }}-{{ .random }}" + ... +``` + +### Defining a custom naming strategy for MachineDeployment objects + +The naming strategy for MachineDeployments supports the following properties: + +- `template`: Custom template which is used when generating the name of the MachineDeployment object. + +The following variables can be referenced in templates: + +- `.cluster.name`: The name of the cluster object. +- `.random`: A random alphanumeric string, without vowels, of length 5. +- `.machineDeployment.topologyName`: The name of the MachineDeployment topology (`Cluster.spec.topology.workers.machineDeployments[].name`) + +Example which would match the default behavior: + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: docker-clusterclass-v0.1.0 +spec: + controlPlane: + ... + workers: + machineDeployments: + - class: default-worker + ... + namingStrategy: + template: "{{ .cluster.name }}-{{ .machineDeployment.topologyName }}-{{ .random }}" +``` + +### Defining a custom naming strategy for MachinePool objects + +The naming strategy for MachinePools supports the following properties: + +- `template`: Custom template which is used when generating the name of the MachinePool object. + +The following variables can be referenced in templates: + +- `.cluster.name`: The name of the cluster object. +- `.random`: A random alphanumeric string, without vowels, of length 5. +- `.machinePool.topologyName`: The name of the MachinePool topology (`Cluster.spec.topology.workers.machinePools[].name`). + +Example which would match the default behavior: + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: docker-clusterclass-v0.1.0 +spec: + controlPlane: + ... + workers: + machinePools: + - class: default-worker + ... + namingStrategy: + template: "{{ .cluster.name }}-{{ .machinePool.topologyName }}-{{ .random }}" +``` + ## Advanced features of ClusterClass with patches This section will explain more advanced features of ClusterClass patches. -### MachineDeployment variable overrides +### MachineDeployment & MachinePool variable overrides If you want to use many variations of MachineDeployments in Clusters, you can either define a MachineDeployment class for every variation or you can define patches and variables to -make a single MachineDeployment class more flexible. +make a single MachineDeployment class more flexible. The same applies for MachinePools. In the following example we make the `instanceType` of a `AWSMachineTemplate` customizable. First we define the `workerMachineType` variable and the corresponding patch: @@ -346,7 +495,7 @@ spec: ``` In the Cluster resource the `workerMachineType` variable can then be set cluster-wide and -it can also be overridden for an individual MachineDeployment. +it can also be overridden for an individual MachineDeployment or MachinePool. ```yaml apiVersion: cluster.x-k8s.io/v1beta1 @@ -386,6 +535,7 @@ referenced in patches: - `builtin.cluster.{name,namespace}` - `builtin.cluster.topology.{version,class}` - `builtin.cluster.network.{serviceDomain,services,pods,ipFamily}` + - Note: ipFamily is deprecated and will be removed in a future release. see https://github.com/kubernetes-sigs/cluster-api/issues/7521. - `builtin.controlPlane.{replicas,version,name}` - Please note, these variables are only available when patching control plane or control plane machine templates. @@ -398,6 +548,12 @@ referenced in patches: - `builtin.machineDeployment.{infrastructureRef.name,bootstrap.configRef.name}` - Please note, these variables are only available when patching the templates of a MachineDeployment and contain the values of the current `MachineDeployment` topology. +- `builtin.machinePool.{replicas,version,class,name,topologyName}` + - Please note, these variables are only available when patching the templates of a MachinePool + and contain the values of the current `MachinePool` topology. +- `builtin.machinePool.{infrastructureRef.name,bootstrap.configRef.name}` + - Please note, these variables are only available when patching the templates of a MachinePool + and contain the values of the current `MachinePool` topology. Builtin variables can be referenced just like regular variables, e.g.: ```yaml @@ -422,8 +578,8 @@ spec: **Tips & Tricks** Builtin variables can be used to dynamically calculate image names. The version used in the patch -will always be the same as the one we set in the corresponding MachineDeployment (works the same way -with `.builtin.controlPlane.version`). +will always be the same as the one we set in the corresponding MachineDeployment or MachinePool +(works the same way with `.builtin.controlPlane.version`). ```yaml apiVersion: cluster.x-k8s.io/v1beta1 @@ -713,6 +869,9 @@ accessible via built in variables: - `builtin.machineDeployment.version`, represent the desired version for each specific MachineDeployment object; this version changes only after the upgrade for the control plane is completed, and in case of many MachineDeployments in the same cluster, they are upgraded sequentially. +- `builtin.machinePool.version`, represent the desired version for each specific MachinePool object; + this version changes only after the upgrade for the control plane is completed, and in case of many + MachinePools in the same cluster, they are upgraded sequentially. This info should provide the bases for developing version-aware patches, allowing the patch author to determine when a patch should adapt to the new Kubernetes version by choosing one of the above variables. In practice the @@ -720,6 +879,7 @@ following rules applies to the most common use cases: - When developing a version-aware patch for the control plane, `builtin.controlPlane.version` must be used. - When developing a version-aware patch for MachineDeployments, `builtin.machineDeployment.version` must be used. +- When developing a version-aware patch for MachinePools, `builtin.machinePool.version` must be used. **Tips & Tricks**: diff --git a/docs/book/src/tasks/experimental-features/cluster-resource-set.md b/docs/book/src/tasks/experimental-features/cluster-resource-set.md index ec73215676ba..8a666e73fd6f 100644 --- a/docs/book/src/tasks/experimental-features/cluster-resource-set.md +++ b/docs/book/src/tasks/experimental-features/cluster-resource-set.md @@ -1,4 +1,4 @@ -# Experimental Feature: ClusterResourceSet (alpha) +# Experimental Feature: ClusterResourceSet (beta) The `ClusterResourceSet` feature is introduced to provide a way to automatically apply a set of resources (such as CNI/CSI) defined by users to matching newly-created/existing clusters. @@ -6,9 +6,46 @@ The `ClusterResourceSet` feature is introduced to provide a way to automatically **Variable name to enable/disable the feature gate**: `EXP_CLUSTER_RESOURCE_SET` -More details on `ClusterResourceSet` and an example to test it can be found at: +The `ClusterResourceSet` feature is enabled by default, but can be disabled by setting the `EXP_CLUSTER_RESOURCE_SET` environment variable to `false`. + +More details on `ClusterResourceSet` can be found at: [ClusterResourceSet CAEP](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20200220-cluster-resource-set.md) +## Example + +Suppose you want to automatically install the relevant external cloud provider on all workload clusters. +This can be accomplished by labeling the clusters with the specific cloud (e.g. AWS, GCP or OpenStack) and then creating a `ClusterResourceSet` for each. +For example, you could have the following for OpenStack: + +```yaml +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: cloud-provider-openstack + namespace: default +spec: + strategy: Reconcile + clusterSelector: + matchLabels: + cloud: openstack + resources: + - name: cloud-provider-openstack + kind: ConfigMap + - name: cloud-config + kind: Secret +``` + +This `ClusterResourceSet` would apply the content of the `Secret` `cloud-config` and of the `ConfigMap` `cloud-provider-openstack` in all workload clusters with the label `cloud=openstack`. +Suppose you have the file `cloud.conf` that should be included in the `Secret` and `cloud-provider-openstack.yaml` that should be in the `ConfigMap`. +The `Secret` and `ConfigMap` can then be created in the following way: + +```bash +kubectl create secret generic cloud-config --from-file=cloud.conf --type=addons.cluster.x-k8s.io/resource-set +kubectl create configmap cloud-provider-openstack --from-file=cloud-provider-openstack.yaml +``` + +Note that it is required that the `Secret` has the type `addons.cluster.x-k8s.io/resource-set` for it to be picked up. + ## Update from `ApplyOnce` to `Reconcile` The `strategy` field is immutable so existing CRS can't be updated directly. However, CAPI won't delete the managed resources in the target cluster when the CRS is deleted. diff --git a/docs/book/src/tasks/experimental-features/experimental-features.md b/docs/book/src/tasks/experimental-features/experimental-features.md index 3564980e34ff..ced97b4d2a33 100644 --- a/docs/book/src/tasks/experimental-features/experimental-features.md +++ b/docs/book/src/tasks/experimental-features/experimental-features.md @@ -8,15 +8,15 @@ temporary location for features which will be moved to their permanent locations Users can enable/disable features by setting OS environment variables before running `clusterctl init`, e.g.: ```yaml -export EXP_CLUSTER_RESOURCE_SET=true +export EXP_SOME_FEATURE_NAME=true clusterctl init --infrastructure vsphere ``` -As an alternative to environment variables, it is also possible to set variables in the clusterctl config file located at `$HOME/.cluster-api/clusterctl.yaml`, e.g.: +As an alternative to environment variables, it is also possible to set variables in the clusterctl config file located at `$XDG_CONFIG_HOME/cluster-api/clusterctl.yaml`, e.g.: ```yaml # Values for environment variable substitution -EXP_CLUSTER_RESOURCE_SET: "true" +EXP_SOME_FEATURE_NAME: "true" ``` In case a variable is defined in both the config file and as an OS environment variable, the environment variable takes precedence. For more information on how to set variables for clusterctl, see [clusterctl Configuration File](../../clusterctl/configuration.md) @@ -30,10 +30,9 @@ As an example, Cluster API Provider Azure (CAPZ) has support for MachinePool thr One way is to set experimental variables on the clusterctl config file. For CAPI, these configs are under ./test/e2e/config/... such as `docker.yaml`: ```yaml variables: - EXP_CLUSTER_RESOURCE_SET: "true" - EXP_MACHINE_POOL: "true" CLUSTER_TOPOLOGY: "true" EXP_RUNTIME_SDK: "true" + EXP_MACHINE_SET_PREFLIGHT_CHECKS: "true" ``` Another way is to set them as environmental variables before running e2e tests. @@ -44,34 +43,57 @@ On development environments started with `Tilt`, features can be enabled by sett ```yaml kustomize_substitutions: - EXP_CLUSTER_RESOURCE_SET: 'true' - EXP_MACHINE_POOL: 'true' CLUSTER_TOPOLOGY: 'true' EXP_RUNTIME_SDK: 'true' + EXP_MACHINE_SET_PREFLIGHT_CHECKS: 'true' ``` For more details on setting up a development environment with `tilt`, see [Developing Cluster API with Tilt](../../developer/tilt.md) ## Enabling Experimental Features on Existing Management Clusters -To enable/disable features on existing management clusters, users can modify CAPI controller manager deployment which will restart all controllers with requested features. +To enable/disable features on existing management clusters, users can edit the corresponding controller manager +deployments, which will then trigger a restart with the requested features. E.g. for the CAPI controller manager +deployment: ``` kubectl edit -n capi-system deployment.apps/capi-controller-manager ``` ``` -// Enable/disable available feautures by modifying Args below. +// Enable/disable available features by modifying Args below. Args: --leader-elect --feature-gates=MachinePool=true,ClusterResourceSet=true ``` -Similarly, to **validate** if a particular feature is enabled, see cluster-api-provider deployment arguments by: +Similarly, to **validate** if a particular feature is enabled, see the arguments by issuing: ```bash kubectl describe -n capi-system deployment.apps/capi-controller-manager ``` +Following controller manager deployments have to be edited in order to enable/disable their respective experimental features: + +* [MachinePools](./machine-pools.md): + * [CAPI](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Gloss#capi). + * [CABPK](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Gloss#cabpk). + * [CAPD](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Providers#capd). Other [Infrastructure Providers](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Providers#infrastructure-provider) + might also require this. Please consult the docs of the concrete [Infrastructure Provider](https://cluster-api.sigs.k8s.io/reference/providers#infrastructure) + regarding this. +* [ClusterResourceSet](./cluster-resource-set.md): + * [CAPI](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Gloss#capi). +* [ClusterClass](./cluster-class/index.md): + * [CAPI](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Gloss#capi). + * [KCP](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Gloss#kcp). + * [CAPD](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Providers#capd). Other [Infrastructure Providers](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Providers#infrastructure-provider) + might also require this. Please consult the docs of the concrete [Infrastructure Provider](https://cluster-api.sigs.k8s.io/reference/providers#infrastructure) + regarding this. +* [Ignition Bootstrap configuration](./ignition.md): + * [CABPK](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Gloss#cabpk). + * [KCP](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Gloss#kcp). +* [Runtime SDK](runtime-sdk/index.md): + * [CAPI](https://cluster-api.sigs.k8s.io/reference/glossary.html?highlight=Gloss#capi). + ## Active Experimental Features * [MachinePools](./machine-pools.md) diff --git a/docs/book/src/tasks/experimental-features/ignition.md b/docs/book/src/tasks/experimental-features/ignition.md index 29f86b7687ac..3c5ce518e106 100644 --- a/docs/book/src/tasks/experimental-features/ignition.md +++ b/docs/book/src/tasks/experimental-features/ignition.md @@ -10,6 +10,18 @@ This initial implementation uses Ignition **v2** and was tested with **Flatcar C + + This guide explains how to deploy an AWS workload cluster using Ignition. ## Prerequisites @@ -48,7 +60,7 @@ export AWS_B64ENCODED_CREDENTIALS=$(clusterawsadm bootstrap credentials encode-a # Enable the feature gates controlling Ignition bootstrap. export EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION=true # Used by the kubeadm bootstrap provider -export BOOTSTRAP_FORMAT_IGNITION=true # Used by the AWS provider +export EXP_BOOTSTRAP_FORMAT_IGNITION=true # Used by the AWS provider # Initialize the management cluster. clusterctl init --infrastructure aws @@ -71,14 +83,15 @@ export AWS_S3_BUCKET_NAME=my-bucket export AWS_CONTROL_PLANE_MACHINE_TYPE=t3a.small export AWS_NODE_MACHINE_TYPE=t3a.small -# TODO: Update --from URL once https://github.com/kubernetes-sigs/cluster-api-provider-aws/pull/2271 is merged. clusterctl generate cluster ignition-cluster \ - --from https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/e7c89c9add92a4b233b26a1712518d9616d99e7a/templates/cluster-template-flatcar.yaml \ - --kubernetes-version v1.22.2 \ + --from https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/templates/cluster-template-flatcar.yaml \ + --kubernetes-version v1.28.0 \ --worker-machine-count 2 \ > ignition-cluster.yaml ``` +NOTE: Only certain Kubernetes versions have pre-built Kubernetes AMIs. See [list](https://cluster-api-aws.sigs.k8s.io/topics/images/built-amis) of published pre-built Kubernetes AMIs. + ## Apply the workload cluster ```bash diff --git a/docs/book/src/tasks/experimental-features/machine-pools.md b/docs/book/src/tasks/experimental-features/machine-pools.md index ddd28f7d341d..84c15bfc21ec 100644 --- a/docs/book/src/tasks/experimental-features/machine-pools.md +++ b/docs/book/src/tasks/experimental-features/machine-pools.md @@ -1,15 +1,4 @@ -# Experimental Feature: MachinePool (alpha) - - - +# Experimental Feature: MachinePool (beta) The `MachinePool` feature provides a way to manage a set of machines by defining a common configuration, number of desired machine replicas etc. similar to `MachineDeployment`, except `MachineSet` controllers are responsible for the lifecycle management of the machines for `MachineDeployment`, whereas in `MachinePools`, @@ -28,7 +17,7 @@ For developer docs on the MachinePool controller, see [here](./../../developer/a ## MachinePools vs MachineDeployments -Although MachinePools provide a similar feature to MachineDeployments, MachinePools do so by leveraging an InfraMachinePool which corresponds 1:1 with a resource like VMSS on Azure or Autoscaling Groups on AWS which we treat as a black box. When a MachinePool is scaled up, the InfraMachinePool scales itself up and populates its provider ID list based on the response from the infrastructure provider. On the other hand, a when a MachineDeployment is scaled up, new Machines are created which then create an individual InfraMachine, which corresponds to a VM in any infrastructure provider. +Although MachinePools provide a similar feature to MachineDeployments, MachinePools do so by leveraging an InfraMachinePool which corresponds 1:1 with a resource like VMSS on Azure or Autoscaling Groups on AWS which we treat as a black box. When a MachinePool is scaled up, the InfraMachinePool scales itself up and populates its provider ID list based on the response from the infrastructure provider. On the other hand, when a MachineDeployment is scaled up, new Machines are created which then create an individual InfraMachine, which corresponds to a VM in any infrastructure provider. | MachinePools | MachineDeployments | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | @@ -36,4 +25,4 @@ Although MachinePools provide a similar feature to MachineDeployments, MachinePo | Set of instances is orchestrated by the infrastructure provider. | Set of instances is orchestrated by Cluster API using a MachineSet. | | Each MachinePool corresponds 1:1 with an associated InfraMachinePool. | Each MachineDeployment includes a MachineSet, and for each replica, it creates a Machine and InfraMachine. | | Each MachinePool requires only a single BootstrapConfig. | Each MachineDeployment uses an InfraMachineTemplate and a BootstrapConfigTemplate, and each Machine requires a unique BootstrapConfig. | -| Maintains a list of instances in the `providerIDList` field in the MachinePool spec. This list is populated based on the response from the infrastructure provider. | Maintains a list of instances through the Machine resources owned by the MachineSet. | \ No newline at end of file +| Maintains a list of instances in the `providerIDList` field in the MachinePool spec. This list is populated based on the response from the infrastructure provider. | Maintains a list of instances through the Machine resources owned by the MachineSet. | diff --git a/docs/book/src/tasks/experimental-features/machineset-preflight-checks.md b/docs/book/src/tasks/experimental-features/machineset-preflight-checks.md new file mode 100644 index 000000000000..ad003dc5bd10 --- /dev/null +++ b/docs/book/src/tasks/experimental-features/machineset-preflight-checks.md @@ -0,0 +1,63 @@ +# Experimental Feature: MachineSetPreflightChecks (alpha) + +The `MachineSetPreflightChecks` feature can provide additional safety while creating new Machines and remediating existing unhealthy Machines of a MachineSet. + +When a MachineSet creates machines under certain circumstances, the operation fails or leads to a new machine that will be deleted and recreated in a short timeframe, +leading to unwanted Machine churn. Some of these circumstances include, but not limited to, creating a new Machine when Kubernetes version skew could be violated or +joining a Machine when the Control Plane is upgrading leading to failure because of mixed kube-apiserver version or due to the cluster load balancer delays in adapting +to the changes. + +Enabling `MachineSetPreflightChecks` provides safety in such circumstances by making sure that a Machine is only created when it is safe to do so. + + +**Feature gate name**: `MachineSetPreflightChecks` + +**Variable name to enable/disable the feature gate**: `EXP_MACHINE_SET_PREFLIGHT_CHECKS` + +## Supported PreflightChecks + +### `ControlPlaneIsStable` + +* This preflight check ensures that the ControlPlane is currently stable i.e. the ControlPlane is currently neither provisioning nor upgrading. +* This preflight check is only performed if: + * The Cluster uses a ControlPlane provider. + * ControlPlane version is defined (`ControlPlane.spec.version` is set). + +### `KubernetesVersionSkew` + +* This preflight check ensures that the MachineSet and the ControlPlane conform to the [Kubernetes version skew](https://kubernetes.io/releases/version-skew-policy/#kubelet). +* This preflight check is only performed if: + * The Cluster uses a ControlPlane provider. + * ControlPlane version is defined (`ControlPlane.spec.version` is set). + * MachineSet version is defined (`MachineSet.spec.template.spec.version` is set). + +### `KubeadmVersionSkew` + +* This preflight check ensures that the MachineSet and the ControlPlane conform to the [kubeadm version skew](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#kubeadm-s-skew-against-kubeadm). +* This preflight check is only performed if: + * The Cluster uses a ControlPlane provider. + * ControlPlane version is defined (`ControlPlane.spec.version` is set). + * MachineSet version is defined (`MachineSet.spec.template.spec.version` is set). + * MachineSet uses the `Kubeadm` Bootstrap provider. + +## Opting out of PreflightChecks + +Once the feature flag is enabled the preflight checks are enabled for all the MachineSets including new and existing MachineSets. +It is possible to opt-out of one or all of the preflight checks on a per MachineSet basis by specifying a comma-separated list of the preflight checks on the +`machineset.cluster.x-k8s.io/skip-preflight-checks` annotation on the MachineSet. + +Examples: +* To opt out of all the preflight checks set the `machineset.cluster.x-k8s.io/skip-preflight-checks: All` annotation. +* To opt out of the `ControlPlaneIsStable` preflight check set the `machineset.cluster.x-k8s.io/skip-preflight-checks: ControlPlaneIsStable` annotation. +* To opt out of multiple preflight checks set the `machineset.cluster.x-k8s.io/skip-preflight-checks: ControlPlaneIsStable,KubernetesVersionSkew` annotation. + + + + diff --git a/docs/book/src/tasks/experimental-features/runtime-sdk/deploy-runtime-extension.md b/docs/book/src/tasks/experimental-features/runtime-sdk/deploy-runtime-extension.md index 6157f3176cb8..858445f14bce 100644 --- a/docs/book/src/tasks/experimental-features/runtime-sdk/deploy-runtime-extension.md +++ b/docs/book/src/tasks/experimental-features/runtime-sdk/deploy-runtime-extension.md @@ -15,6 +15,7 @@ controllers. The recommended deployment model is to deploy a Runtime Extension i - Using a Kubernetes Deployment to run the above container inside the Management Cluster. - Using a Cluster IP Service to make the Runtime Extension instances accessible via a stable DNS name. - Using a cert-manager generated Certificate to protect the endpoint. +- Register the Runtime Extension using ExtensionConfig. For an example, please see our [test extension](https://github.com/kubernetes-sigs/cluster-api/tree/main/test/extension) which follows, as closely as possible, the kubebuilder setup used for controllers in Cluster API. diff --git a/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md b/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md index 8d6fe937ee88..1265600970c6 100644 --- a/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md +++ b/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md @@ -84,22 +84,27 @@ func InitFlags(fs *pflag.FlagSet) { "Webhook Server port") fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", - "Webhook cert dir, only used when webhook-port is specified.") + "Webhook cert dir.") } func main() { // Creates a logger to be used during the main func. - setupLog := ctrl.Log.WithName("main") + setupLog := ctrl.Log.WithName("setup") // Initialize and parse command line flags. InitFlags(pflag.CommandLine) pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + // Set log level 2 as default. + if err := pflag.CommandLine.Set("v", "2"); err != nil { + setupLog.Error(err, "Failed to set default log level") + os.Exit(1) + } pflag.Parse() // Validates logs flags using Kubernetes component-base machinery and applies them if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { - setupLog.Error(err, "unable to start extension") + setupLog.Error(err, "Unable to start extension") os.Exit(1) } @@ -121,7 +126,7 @@ func main() { CertDir: webhookCertDir, }) if err != nil { - setupLog.Error(err, "error creating webhook server") + setupLog.Error(err, "Error creating webhook server") os.Exit(1) } @@ -131,7 +136,7 @@ func main() { Name: "before-cluster-create", HandlerFunc: DoBeforeClusterCreate, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{ @@ -139,7 +144,7 @@ func main() { Name: "before-cluster-upgrade", HandlerFunc: DoBeforeClusterUpgrade, }); err != nil { - setupLog.Error(err, "error adding handler") + setupLog.Error(err, "Error adding handler") os.Exit(1) } @@ -149,7 +154,7 @@ func main() { // Start the https server. setupLog.Info("Starting Runtime Extension server") if err := webhookServer.Start(ctx); err != nil { - setupLog.Error(err, "error running webhook server") + setupLog.Error(err, "Error running webhook server") os.Exit(1) } } @@ -274,7 +279,7 @@ well with practices like unit testing and generally makes the entire system more ### Error messages -RuntimeExtension authors should be aware that error messages are surfaced as a conditions in Kubernetes resources +RuntimeExtension authors should be aware that error messages are surfaced as a conditions in Kubernetes resources and recorded in Cluster API controller's logs. As a consequence: - Error message must not contain any sensitive information. @@ -286,16 +291,44 @@ and recorded in Cluster API controller's logs. As a consequence:

Caution

If an error message is not deterministic and it changes at every call even if the problem is the same, it could -lead to to Kubernetes resources conditions continuously changing, and this generates a denial attack to +lead to to Kubernetes resources conditions continuously changing, and this generates a denial attack to controllers processing those resource that might impact system stability. +### ExtensionConfig + +To register your runtime extension apply the ExtensionConfig resource in the management cluster, including your CA +certs, ClusterIP service associated with the app and namespace, and the target namespace for the given extension. Once +created, the extension will detect the associated service and discover the associated Hooks. For clarification, you can +check the status of the ExtensionConfig. Below is an example of `ExtensionConfig` - + +```yaml +apiVersion: runtime.cluster.x-k8s.io/v1alpha1 +kind: ExtensionConfig +metadata: + annotations: + runtime.cluster.x-k8s.io/inject-ca-from-secret: default/test-runtime-sdk-svc-cert + name: test-runtime-sdk-extensionconfig +spec: + clientConfig: + service: + name: test-runtime-sdk-svc + namespace: default # Note: this assumes the test extension get deployed in the default namespace + port: 443 + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - default # Note: this assumes the test extension is used by Cluster in the default namespace only +``` + ### Settings Settings can be added to the ExtensionConfig object in the form of a map with string keys and values. These settings are sent with each request to hooks registered by that ExtensionConfig. Extension developers can implement behavior in their -extensions to alter behavior based on these settings. Settings should be well documented by extension developers so that +extensions to alter behavior based on these settings. Settings should be well documented by extension developers so that ClusterClass authors can understand usage and expected behaviour. Settings can be provided for individual external patches by providing them in the ClusterClass `.spec.patches[*].external.settings`. @@ -325,6 +358,8 @@ implementation documentation. ## Tips & tricks +Make sure to add the ExtensionConfig object to the YAML manifest used to deploy the runtime extensions (see [Extensionsconfig](#extensionconfig) for more details). + After you implemented and deployed a Runtime Extension you can manually test it by sending HTTP requests. This can be for example done via kubectl: @@ -347,7 +382,7 @@ curl -X 'POST' 'http://127.0.0.1:8001/api/v1/namespaces/default/services/https:w -d '{"apiVersion":"hooks.runtime.cluster.x-k8s.io/v1alpha1","kind":"DiscoveryRequest"}' | jq ``` -For more details about the API of the Runtime Extensions please see . +For more details about the API of the Runtime Extensions please see . For more details on proxy support please see [Proxies in Kubernetes](https://kubernetes.io/docs/concepts/cluster-administration/proxies/).